summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-11-10 02:02:27 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2013-11-11 02:40:36 +0100
commitf230a1cf749e984439b5bb9729d9db9f48472827 (patch)
tree153596de2251b717ad79823f23fabf4c140d6d35 /deps/v8
parenta12870c823b9b67110b27a470fcac342cf1dfbd6 (diff)
downloadandroid-node-v8-f230a1cf749e984439b5bb9729d9db9f48472827.tar.gz
android-node-v8-f230a1cf749e984439b5bb9729d9db9f48472827.tar.bz2
android-node-v8-f230a1cf749e984439b5bb9729d9db9f48472827.zip
v8: upgrade to 3.22.24
This commit removes the simple/test-event-emitter-memory-leak test for being unreliable with the new garbage collector: the memory pressure exerted by the test case is too low for the garbage collector to kick in. It can be made to work again by limiting the heap size with the --max_old_space_size=x flag but that won't be very reliable across platforms and architectures.
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore8
-rw-r--r--deps/v8/ChangeLog223
-rw-r--r--deps/v8/Makefile23
-rw-r--r--deps/v8/Makefile.nacl3
-rw-r--r--deps/v8/OWNERS4
-rw-r--r--deps/v8/PRESUBMIT.py18
-rw-r--r--[-rwxr-xr-x]deps/v8/WATCHLISTS (renamed from deps/v8/tools/status-file-converter.py)27
-rw-r--r--deps/v8/benchmarks/deltablue.js26
-rw-r--r--deps/v8/build/all.gyp1
-rw-r--r--deps/v8/build/features.gypi16
-rw-r--r--deps/v8/build/standalone.gypi19
-rw-r--r--deps/v8/build/toolchain.gypi27
-rwxr-xr-xdeps/v8/include/v8-debug.h4
-rw-r--r--deps/v8/include/v8-defaults.h54
-rw-r--r--deps/v8/include/v8-preparser.h84
-rw-r--r--deps/v8/include/v8-profiler.h25
-rw-r--r--deps/v8/include/v8-testing.h4
-rw-r--r--deps/v8/include/v8.h518
-rw-r--r--deps/v8/include/v8config.h15
-rw-r--r--deps/v8/preparser/preparser-process.cc372
-rw-r--r--deps/v8/preparser/preparser.gyp58
-rw-r--r--deps/v8/samples/lineprocessor.cc10
-rw-r--r--deps/v8/samples/samples.gyp8
-rw-r--r--deps/v8/samples/shell.cc18
-rw-r--r--deps/v8/src/OWNERS2
-rw-r--r--deps/v8/src/accessors.cc55
-rw-r--r--deps/v8/src/accessors.h7
-rw-r--r--deps/v8/src/allocation-site-scopes.cc108
-rw-r--r--deps/v8/src/allocation-site-scopes.h115
-rw-r--r--deps/v8/src/allocation-tracker.cc279
-rw-r--r--deps/v8/src/allocation-tracker.h138
-rw-r--r--deps/v8/src/api.cc877
-rw-r--r--deps/v8/src/api.h26
-rw-r--r--deps/v8/src/apinatives.js1
-rw-r--r--deps/v8/src/arguments.cc13
-rw-r--r--deps/v8/src/arguments.h18
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h7
-rw-r--r--deps/v8/src/arm/assembler-arm.cc1
-rw-r--r--deps/v8/src/arm/assembler-arm.h81
-rw-r--r--deps/v8/src/arm/builtins-arm.cc116
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1346
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h26
-rw-r--r--deps/v8/src/arm/codegen-arm.cc115
-rw-r--r--deps/v8/src/arm/codegen-arm.h1
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc107
-rw-r--r--deps/v8/src/arm/frames-arm.h2
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc171
-rw-r--r--deps/v8/src/arm/ic-arm.cc6
-rw-r--r--deps/v8/src/arm/lithium-arm.cc423
-rw-r--r--deps/v8/src/arm/lithium-arm.h462
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc671
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h68
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc4
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc266
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h72
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h5
-rw-r--r--deps/v8/src/arm/simulator-arm.cc79
-rw-r--r--deps/v8/src/arm/simulator-arm.h9
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc242
-rw-r--r--deps/v8/src/array-iterator.js8
-rw-r--r--deps/v8/src/array.js26
-rw-r--r--deps/v8/src/arraybuffer.js8
-rw-r--r--deps/v8/src/assembler.cc42
-rw-r--r--deps/v8/src/assembler.h18
-rw-r--r--deps/v8/src/ast.cc61
-rw-r--r--deps/v8/src/ast.h534
-rw-r--r--deps/v8/src/bootstrapper.cc9
-rw-r--r--deps/v8/src/builtins.cc122
-rw-r--r--deps/v8/src/builtins.h52
-rw-r--r--deps/v8/src/checks.cc46
-rw-r--r--deps/v8/src/checks.h19
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc292
-rw-r--r--deps/v8/src/code-stubs.cc552
-rw-r--r--deps/v8/src/code-stubs.h326
-rw-r--r--deps/v8/src/codegen.cc14
-rw-r--r--deps/v8/src/compiler.cc252
-rw-r--r--deps/v8/src/compiler.h31
-rw-r--r--deps/v8/src/contexts.cc2
-rw-r--r--deps/v8/src/conversions-inl.h4
-rw-r--r--deps/v8/src/conversions.cc16
-rw-r--r--deps/v8/src/counters.h33
-rw-r--r--deps/v8/src/cpu-profiler.cc30
-rw-r--r--deps/v8/src/cpu-profiler.h2
-rw-r--r--deps/v8/src/d8-debug.cc2
-rw-r--r--deps/v8/src/d8-posix.cc51
-rw-r--r--deps/v8/src/d8-readline.cc5
-rw-r--r--deps/v8/src/d8.cc46
-rw-r--r--deps/v8/src/d8.gyp8
-rw-r--r--deps/v8/src/d8.h5
-rw-r--r--deps/v8/src/d8.js14
-rw-r--r--deps/v8/src/date.js32
-rw-r--r--deps/v8/src/debug-debugger.js4
-rw-r--r--deps/v8/src/debug.cc14
-rw-r--r--deps/v8/src/debug.h1
-rw-r--r--deps/v8/src/defaults.cc (renamed from deps/v8/src/marking-thread.cc)79
-rw-r--r--deps/v8/src/deoptimizer.cc175
-rw-r--r--deps/v8/src/deoptimizer.h111
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/elements.cc2
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc11
-rw-r--r--deps/v8/src/factory.cc142
-rw-r--r--deps/v8/src/factory.h35
-rw-r--r--deps/v8/src/flag-definitions.h168
-rw-r--r--deps/v8/src/flags.cc37
-rw-r--r--deps/v8/src/frames.h7
-rw-r--r--deps/v8/src/full-codegen.cc160
-rw-r--r--deps/v8/src/full-codegen.h148
-rw-r--r--deps/v8/src/global-handles.cc2
-rw-r--r--deps/v8/src/globals.h6
-rw-r--r--deps/v8/src/handles-inl.h11
-rw-r--r--deps/v8/src/handles.cc85
-rw-r--r--deps/v8/src/handles.h22
-rw-r--r--deps/v8/src/harmony-math.js60
-rw-r--r--deps/v8/src/heap-inl.h76
-rw-r--r--deps/v8/src/heap-profiler.cc80
-rw-r--r--deps/v8/src/heap-profiler.h33
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc372
-rw-r--r--deps/v8/src/heap-snapshot-generator.h53
-rw-r--r--deps/v8/src/heap.cc684
-rw-r--r--deps/v8/src/heap.h286
-rw-r--r--deps/v8/src/hydrogen-alias-analysis.h9
-rw-r--r--deps/v8/src/hydrogen-canonicalize.cc8
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc357
-rw-r--r--deps/v8/src/hydrogen-check-elimination.h (renamed from deps/v8/src/marking-thread.h)36
-rw-r--r--deps/v8/src/hydrogen-dce.cc74
-rw-r--r--deps/v8/src/hydrogen-dce.h3
-rw-r--r--deps/v8/src/hydrogen-deoptimizing-mark.cc126
-rw-r--r--deps/v8/src/hydrogen-escape-analysis.cc16
-rw-r--r--deps/v8/src/hydrogen-flow-engine.h235
-rw-r--r--deps/v8/src/hydrogen-gvn.cc40
-rw-r--r--deps/v8/src/hydrogen-instructions.cc288
-rw-r--r--deps/v8/src/hydrogen-instructions.h1559
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc510
-rw-r--r--deps/v8/src/hydrogen-load-elimination.h50
-rw-r--r--deps/v8/src/hydrogen-mark-unreachable.cc77
-rw-r--r--deps/v8/src/hydrogen-mark-unreachable.h (renamed from deps/v8/src/hydrogen-deoptimizing-mark.h)19
-rw-r--r--deps/v8/src/hydrogen-osr.cc27
-rw-r--r--deps/v8/src/hydrogen-osr.h6
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.cc70
-rw-r--r--deps/v8/src/hydrogen-redundant-phi.h3
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc5
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.cc13
-rw-r--r--deps/v8/src/hydrogen.cc2104
-rw-r--r--deps/v8/src/hydrogen.h456
-rw-r--r--deps/v8/src/i18n.cc12
-rw-r--r--deps/v8/src/i18n.js70
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h11
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc91
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h50
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc93
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1699
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h28
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc59
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc95
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc16
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc96
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc4
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc562
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h69
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc20
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc266
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h98
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc241
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h56
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc243
-rw-r--r--deps/v8/src/ic-inl.h22
-rw-r--r--deps/v8/src/ic.cc1803
-rw-r--r--deps/v8/src/ic.h356
-rw-r--r--deps/v8/src/incremental-marking.cc4
-rw-r--r--deps/v8/src/isolate-inl.h5
-rw-r--r--deps/v8/src/isolate.cc67
-rw-r--r--deps/v8/src/isolate.h71
-rw-r--r--deps/v8/src/json.js4
-rw-r--r--deps/v8/src/list.h2
-rw-r--r--deps/v8/src/lithium-allocator-inl.h10
-rw-r--r--deps/v8/src/lithium-allocator.cc47
-rw-r--r--deps/v8/src/lithium-allocator.h15
-rw-r--r--deps/v8/src/lithium-codegen.cc150
-rw-r--r--deps/v8/src/lithium-codegen.h96
-rw-r--r--deps/v8/src/lithium.cc10
-rw-r--r--deps/v8/src/lithium.h5
-rw-r--r--deps/v8/src/liveedit-debugger.js76
-rw-r--r--deps/v8/src/liveedit.cc26
-rw-r--r--deps/v8/src/log.cc23
-rw-r--r--deps/v8/src/log.h5
-rw-r--r--deps/v8/src/macros.py5
-rw-r--r--deps/v8/src/mark-compact.cc155
-rw-r--r--deps/v8/src/mark-compact.h11
-rw-r--r--deps/v8/src/math.js65
-rw-r--r--deps/v8/src/messages.js20
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h7
-rw-r--r--deps/v8/src/mips/assembler-mips.cc1
-rw-r--r--deps/v8/src/mips/assembler-mips.h44
-rw-r--r--deps/v8/src/mips/builtins-mips.cc98
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1185
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h41
-rw-r--r--deps/v8/src/mips/codegen-mips.cc75
-rw-r--r--deps/v8/src/mips/codegen-mips.h1
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc87
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc97
-rw-r--r--deps/v8/src/mips/ic-mips.cc4
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc649
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h66
-rw-r--r--deps/v8/src/mips/lithium-mips.cc454
-rw-r--r--deps/v8/src/mips/lithium-mips.h454
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc202
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h61
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc81
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h8
-rw-r--r--deps/v8/src/mips/simulator-mips.cc10
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc209
-rw-r--r--deps/v8/src/mirror-debugger.js8
-rw-r--r--deps/v8/src/mksnapshot.cc49
-rw-r--r--deps/v8/src/msan.h49
-rw-r--r--deps/v8/src/object-observe.js13
-rw-r--r--deps/v8/src/objects-debug.cc45
-rw-r--r--deps/v8/src/objects-inl.h323
-rw-r--r--deps/v8/src/objects-printer.cc28
-rw-r--r--deps/v8/src/objects-visiting-inl.h24
-rw-r--r--deps/v8/src/objects-visiting.cc3
-rw-r--r--deps/v8/src/objects-visiting.h2
-rw-r--r--deps/v8/src/objects.cc3066
-rw-r--r--deps/v8/src/objects.h801
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc254
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h90
-rw-r--r--deps/v8/src/parser.cc778
-rw-r--r--deps/v8/src/parser.h93
-rw-r--r--deps/v8/src/platform-cygwin.cc12
-rw-r--r--deps/v8/src/platform-freebsd.cc11
-rw-r--r--deps/v8/src/platform-linux.cc24
-rw-r--r--deps/v8/src/platform-macos.cc28
-rw-r--r--deps/v8/src/platform-openbsd.cc35
-rw-r--r--deps/v8/src/platform-posix.cc44
-rw-r--r--deps/v8/src/platform-posix.h106
-rw-r--r--deps/v8/src/platform-solaris.cc20
-rw-r--r--deps/v8/src/platform-win32.cc197
-rw-r--r--deps/v8/src/platform.h12
-rw-r--r--deps/v8/src/platform/elapsed-timer.h6
-rw-r--r--deps/v8/src/platform/mutex.h4
-rw-r--r--deps/v8/src/platform/semaphore.h4
-rw-r--r--deps/v8/src/platform/time.cc134
-rw-r--r--deps/v8/src/platform/time.h7
-rw-r--r--deps/v8/src/preparser-api.cc196
-rw-r--r--deps/v8/src/preparser.cc865
-rw-r--r--deps/v8/src/preparser.h404
-rw-r--r--deps/v8/src/prettyprinter.cc68
-rw-r--r--deps/v8/src/profile-generator-inl.h33
-rw-r--r--deps/v8/src/profile-generator.cc100
-rw-r--r--deps/v8/src/profile-generator.h69
-rw-r--r--deps/v8/src/property-details.h5
-rw-r--r--deps/v8/src/proxy.js4
-rw-r--r--deps/v8/src/regexp.js8
-rw-r--r--deps/v8/src/rewriter.cc12
-rw-r--r--deps/v8/src/runtime-profiler.cc3
-rw-r--r--deps/v8/src/runtime.cc540
-rw-r--r--deps/v8/src/runtime.h6
-rw-r--r--deps/v8/src/runtime.js8
-rw-r--r--deps/v8/src/sampler.cc6
-rw-r--r--deps/v8/src/scanner.cc140
-rw-r--r--deps/v8/src/scanner.h55
-rw-r--r--deps/v8/src/scopeinfo.cc14
-rw-r--r--deps/v8/src/scopes.cc4
-rw-r--r--deps/v8/src/serialize.cc70
-rw-r--r--deps/v8/src/serialize.h14
-rw-r--r--deps/v8/src/snapshot-common.cc11
-rw-r--r--deps/v8/src/spaces-inl.h38
-rw-r--r--deps/v8/src/spaces.cc110
-rw-r--r--deps/v8/src/spaces.h115
-rw-r--r--deps/v8/src/store-buffer-inl.h1
-rw-r--r--deps/v8/src/string.js9
-rw-r--r--deps/v8/src/stub-cache.cc697
-rw-r--r--deps/v8/src/stub-cache.h367
-rw-r--r--deps/v8/src/type-info.cc80
-rw-r--r--deps/v8/src/type-info.h7
-rw-r--r--deps/v8/src/typedarray.js8
-rw-r--r--deps/v8/src/types.cc21
-rw-r--r--deps/v8/src/types.h36
-rw-r--r--deps/v8/src/typing.cc43
-rw-r--r--deps/v8/src/unicode.h2
-rw-r--r--deps/v8/src/unique.h102
-rw-r--r--deps/v8/src/utils.h4
-rw-r--r--deps/v8/src/utils/random-number-generator.cc19
-rw-r--r--deps/v8/src/utils/random-number-generator.h4
-rw-r--r--deps/v8/src/v8-counters.cc8
-rw-r--r--deps/v8/src/v8-counters.h36
-rw-r--r--deps/v8/src/v8.cc13
-rw-r--r--deps/v8/src/v8natives.js67
-rw-r--r--deps/v8/src/v8threads.cc20
-rw-r--r--deps/v8/src/v8utils.h55
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/win32-math.cc2
-rw-r--r--deps/v8/src/win32-math.h4
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h16
-rw-r--r--deps/v8/src/x64/assembler-x64.cc84
-rw-r--r--deps/v8/src/x64/assembler-x64.h99
-rw-r--r--deps/v8/src/x64/builtins-x64.cc94
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc1026
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h28
-rw-r--r--deps/v8/src/x64/codegen-x64.cc32
-rw-r--r--deps/v8/src/x64/codegen-x64.h2
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc86
-rw-r--r--deps/v8/src/x64/disasm-x64.cc47
-rw-r--r--deps/v8/src/x64/frames-x64.h6
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc107
-rw-r--r--deps/v8/src/x64/ic-x64.cc4
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc458
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h60
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc6
-rw-r--r--deps/v8/src/x64/lithium-x64.cc249
-rw-r--r--deps/v8/src/x64/lithium-x64.h100
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc494
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h118
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc303
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status8
-rw-r--r--deps/v8/test/benchmarks/testcfg.py2
-rw-r--r--deps/v8/test/cctest/cctest.cc75
-rw-r--r--deps/v8/test/cctest/cctest.gyp5
-rw-r--r--deps/v8/test/cctest/cctest.h136
-rw-r--r--deps/v8/test/cctest/cctest.status173
-rw-r--r--deps/v8/test/cctest/test-accessors.cc34
-rw-r--r--deps/v8/test/cctest/test-alloc.cc20
-rw-r--r--deps/v8/test/cctest/test-api.cc1997
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc40
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc45
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc62
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc255
-rw-r--r--deps/v8/test/cctest/test-ast.cc4
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-compiler.cc37
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc50
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc132
-rw-r--r--deps/v8/test/cctest/test-dataflow.cc2
-rw-r--r--deps/v8/test/cctest/test-date.cc2
-rw-r--r--deps/v8/test/cctest/test-debug.cc369
-rw-r--r--deps/v8/test/cctest/test-declarative-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-decls.cc76
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc50
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc31
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc55
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc97
-rw-r--r--deps/v8/test/cctest/test-flags.cc35
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc4
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc40
-rw-r--r--deps/v8/test/cctest/test-global-object.cc2
-rw-r--r--deps/v8/test/cctest/test-hashing.cc10
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc192
-rw-r--r--deps/v8/test/cctest/test-heap.cc752
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc37
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc57
-rw-r--r--deps/v8/test/cctest/test-log.cc33
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc136
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc136
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc282
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc157
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc142
-rw-r--r--deps/v8/test/cctest/test-parsing.cc174
-rw-r--r--deps/v8/test/cctest/test-platform.cc2
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc16
-rw-r--r--deps/v8/test/cctest/test-random.cc6
-rw-r--r--deps/v8/test/cctest/test-regexp.cc87
-rw-r--r--deps/v8/test/cctest/test-serialize.cc42
-rw-r--r--deps/v8/test/cctest/test-spaces.cc38
-rw-r--r--deps/v8/test/cctest/test-strings.cc42
-rw-r--r--deps/v8/test/cctest/test-symbols.cc6
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc72
-rw-r--r--deps/v8/test/cctest/test-threads.cc18
-rw-r--r--deps/v8/test/cctest/test-time.cc53
-rw-r--r--deps/v8/test/cctest/test-types.cc151
-rw-r--r--deps/v8/test/cctest/test-unique.cc237
-rw-r--r--deps/v8/test/cctest/test-utils.cc25
-rw-r--r--deps/v8/test/intl/OWNERS2
-rw-r--r--deps/v8/test/intl/date-format/parse-MMMdy.js15
-rw-r--r--deps/v8/test/intl/date-format/parse-mdy.js32
-rw-r--r--deps/v8/test/intl/date-format/parse-mdyhms.js22
-rw-r--r--deps/v8/test/intl/date-format/timezone-name.js53
-rw-r--r--deps/v8/test/intl/intl.status19
-rw-r--r--deps/v8/test/intl/testcfg.py1
-rw-r--r--deps/v8/test/message/message.status10
-rw-r--r--deps/v8/test/message/paren_in_arg_string.out4
-rw-r--r--deps/v8/test/message/testcfg.py2
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js138
-rw-r--r--deps/v8/test/mjsunit/array-functions-prototype-misc.js4
-rw-r--r--deps/v8/test/mjsunit/array-literal-feedback.js51
-rw-r--r--deps/v8/test/mjsunit/big-array-literal.js9
-rw-r--r--deps/v8/test/mjsunit/big-object-literal.js9
-rw-r--r--deps/v8/test/mjsunit/bitwise-operations-bools.js94
-rw-r--r--deps/v8/test/mjsunit/compare-known-objects.js65
-rw-r--r--deps/v8/test/mjsunit/compare-objects.js108
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js (renamed from deps/v8/test/mjsunit/parallel-invalidate-transition-map.js)10
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-proto-change.js (renamed from deps/v8/test/mjsunit/compiler/parallel-proto-change.js)12
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-representation.js73
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis.js70
-rw-r--r--deps/v8/test/mjsunit/compiler/expression-trees.js33
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-global.js196
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-osr.js65
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination.js106
-rw-r--r--deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js (renamed from deps/v8/test/mjsunit/manual-parallel-recompile.js)11
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-alignment.js86
-rw-r--r--deps/v8/test/mjsunit/compiler/rotate.js86
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change.js (renamed from deps/v8/test/mjsunit/parallel-initial-prototype-change.js)11
-rw-r--r--deps/v8/test/mjsunit/d8-performance-now.js62
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-4.js69
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-function-call.js15
-rw-r--r--deps/v8/test/mjsunit/div-mul-minus-one.js53
-rw-r--r--deps/v8/test/mjsunit/fast-prototype.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/math-sign.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/math-trunc.js51
-rw-r--r--deps/v8/test/mjsunit/harmony/object-observe.js118
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarrays.js2
-rw-r--r--deps/v8/test/mjsunit/lithium/DivI.js57
-rw-r--r--deps/v8/test/mjsunit/lithium/MathExp.js (renamed from deps/v8/test/intl/date-format/utils.js)17
-rw-r--r--deps/v8/test/mjsunit/lithium/SeqStringSetChar.js46
-rw-r--r--deps/v8/test/mjsunit/lithium/StoreKeyed.js61
-rw-r--r--deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js109
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status444
-rw-r--r--deps/v8/test/mjsunit/number-tostring-add.js89
-rw-r--r--deps/v8/test/mjsunit/number-tostring-func.js367
-rw-r--r--deps/v8/test/mjsunit/opt-elements-kind.js5
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js5
-rw-r--r--deps/v8/test/mjsunit/regexp-global.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1713.js127
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1713b.js126
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2612.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2931.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-add-minus-zero.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-binop-nosse2.js168
-rw-r--r--deps/v8/test/mjsunit/regress/regress-binop.js181
-rw-r--r--deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js58
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-305309.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-306851.js52
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-309623.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-84186.js)31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-parse-object-literal.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-parse-use-strict.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-polymorphic-load.js (renamed from deps/v8/src/v8preparserdll-main.cc)26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js11
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js2
-rw-r--r--deps/v8/test/mozilla/mozilla.status1343
-rw-r--r--deps/v8/test/preparser/preparser.status24
-rw-r--r--deps/v8/test/preparser/strict-identifiers.pyt8
-rw-r--r--deps/v8/test/preparser/testcfg.py8
-rw-r--r--deps/v8/test/test262/test262.status133
-rw-r--r--deps/v8/test/test262/testcfg.py7
-rw-r--r--deps/v8/test/webkit/webkit.status17
-rwxr-xr-xdeps/v8/tools/android-sync.sh3
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py12
-rwxr-xr-xdeps/v8/tools/grokdump.py13
-rw-r--r--deps/v8/tools/gyp/v8.gyp33
-rwxr-xr-xdeps/v8/tools/merge-to-branch.sh3
-rwxr-xr-xdeps/v8/tools/presubmit.py8
-rw-r--r--deps/v8/tools/profviz/composer.js2
-rwxr-xr-xdeps/v8/tools/push-to-trunk.sh3
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py4
-rwxr-xr-xdeps/v8/tools/run-tests.py27
-rw-r--r--deps/v8/tools/sodium/index.html36
-rw-r--r--deps/v8/tools/sodium/sodium.js409
-rwxr-xr-xdeps/v8/tools/sodium/styles.css70
-rwxr-xr-xdeps/v8/tools/test-push-to-trunk.sh246
-rw-r--r--deps/v8/tools/testrunner/README6
-rw-r--r--deps/v8/tools/testrunner/local/old_statusfile.py462
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py30
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py6
-rw-r--r--deps/v8/tools/testrunner/local/utils.py4
-rw-r--r--deps/v8/tools/testrunner/objects/context.py7
-rw-r--r--deps/v8/tools/v8heapconst.py244
470 files changed, 31094 insertions, 26941 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 282e463fc5..d554ec65d2 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -37,24 +37,16 @@ shell_g
/out
/perf.data
/perf.data.old
-/test/benchmarks/benchmarks.status2
/test/benchmarks/CHECKED_OUT_*
/test/benchmarks/downloaded_*
/test/benchmarks/kraken
/test/benchmarks/octane
/test/benchmarks/sunspider
-/test/cctest/cctest.status2
-/test/message/message.status2
-/test/mjsunit/mjsunit.status2
/test/mozilla/CHECKED_OUT_VERSION
/test/mozilla/data
/test/mozilla/downloaded_*
-/test/mozilla/mozilla.status2
-/test/preparser/preparser.status2
/test/test262/data
/test/test262/test262-*
-/test/test262/test262.status2
-/test/webkit/webkit.status2
/third_party
/tools/jsfunfuzz
/tools/jsfunfuzz.zip
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index b3eba3661a..97895d3693 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,226 @@
+2013-10-31: Version 3.22.24
+
+ Fixed uint32-to-smi conversion in Lithium.
+ (Chromium issue 309623)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-28: Version 3.22.23
+
+ Renamed deprecated __attribute__((no_address_safety_analysis)) to
+ __attribute__((no_sanitize_address)) (Chromium issue 311283)
+
+ Defined DEBUG for v8_optimized_debug=2
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-25: Version 3.22.22
+
+ Record allocation stack traces. (Chromium issue 277984,v8:2949)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-24: Version 3.22.21
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-24: Version 3.22.20
+
+ Made Array.prototype.pop throw if the last element is not configurable.
+
+ Fixed HObjectAccess for loads from migrating prototypes.
+ (Chromium issue 305309)
+
+ Enabled preaging of code objects when --optimize-for-size.
+ (Chromium issue 280984)
+
+ Exposed v8::Function::GetDisplayName to public API.
+ (Chromium issue 17356)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-23: Version 3.22.19
+
+ Fix materialization of captured objects with field tracking.
+ (Chromium issue 298990)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-22: Version 3.22.18
+
+ Add tool to visualize machine code/lithium.
+
+ Handle misaligned loads and stores in load elimination. Do not track
+ misaligned loads and be conservative about invalidating misaligned
+ stores. (issue 2934)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-21: Version 3.22.17
+
+ Harmony: Implement Math.trunc and Math.sign. (issue 2938)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-21: Version 3.22.16
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-18: Version 3.22.15
+
+ Enabled calling the SetReference* & SetObjectGroupId functions with a
+ Persistent<SubclassOfValue>.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-17: Version 3.22.14
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-16: Version 3.22.13
+
+ Do not look up ArrayBuffer on global object in typed array constructor.
+ (issue 2931)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-15: Version 3.22.12
+
+ Added histograms to track fraction of heap spaces and percentage of
+ generated crankshaft code.
+
+ Moved v8_optimized_debug default value to standalone.gypi.
+
+ Track JS allocations as they arrive with no affection on performance
+ when tracking is switched off (Chromium issue 277984).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-14: Version 3.22.11
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-11: Version 3.22.10
+
+ Fixed timezone issues with date-time/parse-* tests.
+ (Chromium issue 2919)
+
+ Added column getter to CpuProfileNode (Chromium issue 302537)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-10: Version 3.22.9
+
+ Ensure only whitelisted stubs have sse2 versions in the snapshot.
+ (fix for chromium 304565)
+
+ Implement ArrayBuffer.isView.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-04: Version 3.22.8
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-03: Version 3.22.7
+
+ Debug: Allow stepping into on a given call frame
+ (Chromium issue 296963).
+
+ Always use timeGetTime() for TimeTicks::Now() on Windows
+ (Chromium issue 288924).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-02: Version 3.22.6
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-01: Version 3.22.5
+
+ Disabled externalization of sliced/cons strings in old pointer space
+ (Chromium issue 276357).
+
+ Turned on handle zapping for release builds
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-30: Version 3.22.4
+
+ Function::Call and Object::CallAsFunction APIs should allow v8::Value as
+ a receiver (issue 2915).
+
+ Removed unnecessary mutex (Chromium issue 291236).
+
+ Removed ArrayBufferView::BaseAddress method.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-27: Version 3.22.3
+
+ Added methods to enable configuration of ResourceConstraints based on
+ limits derived at runtime.
+ (Chromium issue 292928)
+
+ Added -optimize-for-size flag to optimize for memory size (will be used
+ by pre-aging CL), and removed the is_memory_constrained
+ ResourceConstraint.
+ (Chromium issue 292928)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-26: Version 3.22.2
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-25: Version 3.22.1
+
+ Sped up creating typed arrays from array-like objects.
+ (Chromium issue 270507)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-23: Version 3.22.0
+
+ LiveEdit to mark more closure functions for re-instantiation when scope
+ layout changes.
+ (issue 2872)
+
+ Made bounds check elimination iterative instead of recursive.
+ (Chromium issue 289706)
+
+ Turned on i18n support by default.
+
+ Set the proper instance-type on HAllocate in BuildFastLiteral.
+ (Chromium issue 284577)
+
+ Performance and stability improvements on all platforms.
+
+
2013-09-18: Version 3.21.17
Implemented local load/store elimination on basic blocks.
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 288c257396..bbec44076e 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -76,10 +76,10 @@ ifeq ($(snapshot), off)
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
- GYPFLAGS += -Dv8_enable_extra_checks=1
+ GYPFLAGS += -Dv8_enable_extra_checks=1 -Dv8_enable_handle_zapping=1
endif
ifeq ($(extrachecks), off)
- GYPFLAGS += -Dv8_enable_extra_checks=0
+ GYPFLAGS += -Dv8_enable_extra_checks=0 -Dv8_enable_handle_zapping=0
endif
# gdbjit=on/off
ifeq ($(gdbjit), on)
@@ -124,10 +124,15 @@ endif
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
-# i18nsupport=on
-ifeq ($(i18nsupport), on)
- GYPFLAGS += -Dv8_enable_i18n_support=1
+# i18nsupport=off
+ifeq ($(i18nsupport), off)
+ GYPFLAGS += -Dv8_enable_i18n_support=0
+ TESTFLAGS += --noi18n
endif
+# deprecation_warnings=on
+ifeq ($(deprecationwarnings), on)
+ GYPFLAGS += -Dv8_deprecation_warnings=1
+endif
# arm specific flags.
# armv7=false/true
ifeq ($(armv7), false)
@@ -217,8 +222,8 @@ NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
- build/toolchain.gypi preparser/preparser.gyp samples/samples.gyp \
- src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
+ build/toolchain.gypi samples/samples.gyp src/d8.gyp \
+ test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@@ -323,7 +328,7 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 \
- --command-prefix="tools/android-run.py"
+ --command-prefix="tools/android-run.py" $(TESTFLAGS)
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
@@ -331,7 +336,7 @@ $(addsuffix .check, $(ANDROID_ARCHES)): \
$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
- --timeout=600 --nopresubmit \
+ --timeout=600 --nopresubmit --noi18n \
--command-prefix="tools/nacl-run.py"
$(addsuffix .check, $(NACL_ARCHES)): \
diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl
index 02e83ef2bc..2c79ef113e 100644
--- a/deps/v8/Makefile.nacl
+++ b/deps/v8/Makefile.nacl
@@ -74,6 +74,9 @@ endif
# For mksnapshot host generation.
GYPENV += host_os=${HOST_OS}
+# ICU doesn't support NaCl.
+GYPENV += v8_enable_i18n_support=0
+
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 6fe40e21e3..450e9b217c 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -2,12 +2,14 @@ bmeurer@chromium.org
danno@chromium.org
dslomov@chromium.org
hpayer@chromium.org
+ishell@chromium.org
jkummerow@chromium.org
-mmassi@chromium.org
+machenbach@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rossberg@chromium.org
svenpanne@chromium.org
+titzer@chromium.org
ulan@chromium.org
vegorov@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 819331f9e5..75e16e3bd7 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -58,6 +58,17 @@ def _CommonChecks(input_api, output_api):
return results
+def _SkipTreeCheck(input_api, output_api):
+ """Check the env var whether we want to skip tree check.
+ Only skip if src/version.cc has been updated."""
+ src_version = 'src/version.cc'
+ FilterFile = lambda file: file.LocalPath() == src_version
+ if not input_api.AffectedSourceFiles(
+ lambda file: file.LocalPath() == src_version):
+ return False
+ return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
+
+
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
@@ -69,7 +80,8 @@ def CheckChangeOnCommit(input_api, output_api):
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
- results.extend(input_api.canned_checks.CheckTreeIsOpen(
- input_api, output_api,
- json_url='http://v8-status.appspot.com/current?format=json'))
+ if not _SkipTreeCheck(input_api, output_api):
+ results.extend(input_api.canned_checks.CheckTreeIsOpen(
+ input_api, output_api,
+ json_url='http://v8-status.appspot.com/current?format=json'))
return results
diff --git a/deps/v8/tools/status-file-converter.py b/deps/v8/WATCHLISTS
index ba063ee8c7..9c2bce9c55 100755..100644
--- a/deps/v8/tools/status-file-converter.py
+++ b/deps/v8/WATCHLISTS
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -27,13 +25,22 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Watchlist Rules
+# Refer: http://dev.chromium.org/developers/contributing-code/watchlists
-import sys
-from testrunner.local import old_statusfile
+# IMPORTANT: The regular expression filepath is tested against each path using
+# re.search, so it is not usually necessary to add .*.
-if len(sys.argv) != 2:
- print "Usage: %s foo.status" % sys.argv[0]
- print "Will read foo.status and print the converted version to stdout."
- sys.exit(1)
+{
+ 'WATCHLIST_DEFINITIONS': {
+ 'public_api': {
+ 'filepath': 'include/',
+ },
+ },
-print old_statusfile.ConvertNotation(sys.argv[1]).GetOutput()
+ 'WATCHLISTS': {
+ 'public_api': [
+ 'phajdan.jr@chromium.org',
+ ],
+ },
+}
diff --git a/deps/v8/benchmarks/deltablue.js b/deps/v8/benchmarks/deltablue.js
index 548fd96ffb..dacee3f13f 100644
--- a/deps/v8/benchmarks/deltablue.js
+++ b/deps/v8/benchmarks/deltablue.js
@@ -121,23 +121,23 @@ Strength.strongest = function (s1, s2) {
Strength.prototype.nextWeaker = function () {
switch (this.strengthValue) {
- case 0: return Strength.WEAKEST;
- case 1: return Strength.WEAK_DEFAULT;
- case 2: return Strength.NORMAL;
- case 3: return Strength.STRONG_DEFAULT;
- case 4: return Strength.PREFERRED;
- case 5: return Strength.REQUIRED;
+ case 0: return Strength.STRONG_PREFERRED;
+ case 1: return Strength.PREFERRED;
+ case 2: return Strength.STRONG_DEFAULT;
+ case 3: return Strength.NORMAL;
+ case 4: return Strength.WEAK_DEFAULT;
+ case 5: return Strength.WEAKEST;
}
}
// Strength constants.
-Strength.REQUIRED = new Strength(0, "required");
-Strength.STONG_PREFERRED = new Strength(1, "strongPreferred");
-Strength.PREFERRED = new Strength(2, "preferred");
-Strength.STRONG_DEFAULT = new Strength(3, "strongDefault");
-Strength.NORMAL = new Strength(4, "normal");
-Strength.WEAK_DEFAULT = new Strength(5, "weakDefault");
-Strength.WEAKEST = new Strength(6, "weakest");
+Strength.REQUIRED = new Strength(0, "required");
+Strength.STRONG_PREFERRED = new Strength(1, "strongPreferred");
+Strength.PREFERRED = new Strength(2, "preferred");
+Strength.STRONG_DEFAULT = new Strength(3, "strongDefault");
+Strength.NORMAL = new Strength(4, "normal");
+Strength.WEAK_DEFAULT = new Strength(5, "weakDefault");
+Strength.WEAKEST = new Strength(6, "weakest");
/* --- *
* C o n s t r a i n t
diff --git a/deps/v8/build/all.gyp b/deps/v8/build/all.gyp
index 4b2fe52989..ad71fb0e45 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/build/all.gyp
@@ -8,7 +8,6 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
- '../preparser/preparser.gyp:*',
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 3c6d25f758..7863b1c43a 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -54,7 +54,10 @@
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
+
+ # Enable compiler warnings when using V8_DEPRECATED apis.
+ 'v8_deprecation_warnings%': 0,
},
'target_defaults': {
'conditions': [
@@ -76,6 +79,9 @@
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
+ ['v8_deprecation_warnings==1', {
+ 'defines': ['V8_DEPRECATION_WARNINGS',],
+ }],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
@@ -89,21 +95,29 @@
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
+ ['v8_enable_handle_zapping==1', {
+ 'defines': ['ENABLE_HANDLE_ZAPPING',],
+ }],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
+ 'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
+ ['v8_enable_handle_zapping==1', {
+ 'defines': ['ENABLE_HANDLE_ZAPPING',],
+ }],
], # conditions
}, # Release
}, # configurations
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 5c017d5f50..4cb5e00bcc 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -36,7 +36,7 @@
'clang%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@@ -77,6 +77,23 @@
# as errors.
'v8_code%': 0,
+ # Speeds up Debug builds:
+ # 0 - Compiler optimizations off (debuggable) (default). This may
+ # be 5x slower than Release (or worse).
+ # 1 - Turn on compiler optimizations. This may be hard or impossible to
+ # debug. This may still be 2x slower than Release (or worse).
+ # 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
+ # (but leave V8_ENABLE_CHECKS and most other assertions enabled.
+ # This may cause some v8 tests to fail in the Debug configuration.
+ # This roughly matches the performance of a Release build and can
+ # be used by embedders that need to build their own code as debug
+ # but don't want or need a debug version of V8. This should produce
+ # near-release speeds.
+ 'v8_optimized_debug%': 0,
+
+ # Relative path to icu.gyp from this file.
+ 'icu_gyp_path': '../third_party/icu/icu.gyp',
+
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index c1066ebe94..de41fe0d00 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -60,20 +60,6 @@
'v8_enable_backtrace%': 0,
- # Speeds up Debug builds:
- # 0 - Compiler optimizations off (debuggable) (default). This may
- # be 5x slower than Release (or worse).
- # 1 - Turn on compiler optimizations. This may be hard or impossible to
- # debug. This may still be 2x slower than Release (or worse).
- # 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
- # (but leave V8_ENABLE_CHECKS and most other assertions enabled.
- # This may cause some v8 tests to fail in the Debug configuration.
- # This roughly matches the performance of a Release build and can
- # be used by embedders that need to build their own code as debug
- # but don't want or need a debug version of V8. This should produce
- # near-release speeds.
- 'v8_optimized_debug%': 0,
-
# Enable profiling support. Only required on Windows.
'v8_enable_prof%': 0,
@@ -450,6 +436,7 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
+ 'DEBUG'
],
'msvs_settings': {
'VCCLCompilerTool': {
@@ -517,15 +504,6 @@
},
},
'conditions': [
- ['v8_optimized_debug==2', {
- 'defines': [
- 'NDEBUG',
- ],
- }, {
- 'defines': [
- 'DEBUG',
- ],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
@@ -567,6 +545,9 @@
'-fdata-sections',
'-ffunction-sections',
],
+ 'defines': [
+ 'OPTIMIZED_DEBUG'
+ ],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 053b81d2c5..1a86a061e9 100755
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -212,9 +212,13 @@ class V8_EXPORT Debug {
// If no isolate is provided the default isolate is
// used.
+ // TODO(dcarney): remove
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL,
Isolate* isolate = NULL);
+ static void SendCommand(Isolate* isolate,
+ const uint16_t* command, int length,
+ ClientData* client_data = NULL);
// Dispatch interface.
static void SetHostDispatchHandler(HostDispatchHandler handler,
diff --git a/deps/v8/include/v8-defaults.h b/deps/v8/include/v8-defaults.h
new file mode 100644
index 0000000000..381a48210d
--- /dev/null
+++ b/deps/v8/include/v8-defaults.h
@@ -0,0 +1,54 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_DEFAULTS_H_
+#define V8_V8_DEFAULTS_H_
+
+#include "v8.h"
+
+/**
+ * Default configuration support for the V8 JavaScript engine.
+ */
+namespace v8 {
+
+/**
+ * Configures the constraints with reasonable default values based on the
+ * capabilities of the current device the VM is running on.
+ */
+bool V8_EXPORT ConfigureResourceConstraintsForCurrentPlatform(
+ ResourceConstraints* constraints);
+
+
+/**
+ * Convience function which performs SetResourceConstraints with the settings
+ * returned by ConfigureResourceConstraintsForCurrentPlatform.
+ */
+bool V8_EXPORT SetDefaultResourceConstraintsForCurrentPlatform();
+
+} // namespace v8
+
+#endif // V8_V8_DEFAULTS_H_
diff --git a/deps/v8/include/v8-preparser.h b/deps/v8/include/v8-preparser.h
deleted file mode 100644
index 1da77185af..0000000000
--- a/deps/v8/include/v8-preparser.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef PREPARSER_H
-#define PREPARSER_H
-
-#include "v8.h"
-#include "v8stdint.h"
-
-namespace v8 {
-
-// The result of preparsing is either a stack overflow error, or an opaque
-// blob of data that can be passed back into the parser.
-class V8_EXPORT PreParserData {
- public:
- PreParserData(size_t size, const uint8_t* data)
- : data_(data), size_(size) { }
-
- // Create a PreParserData value where stack_overflow reports true.
- static PreParserData StackOverflow() { return PreParserData(0, NULL); }
-
- // Whether the pre-parser stopped due to a stack overflow.
- // If this is the case, size() and data() should not be used.
- bool stack_overflow() { return size_ == 0u; }
-
- // The size of the data in bytes.
- size_t size() const { return size_; }
-
- // Pointer to the data.
- const uint8_t* data() const { return data_; }
-
- private:
- const uint8_t* const data_;
- const size_t size_;
-};
-
-
-// Interface for a stream of Unicode characters.
-class V8_EXPORT UnicodeInputStream { // NOLINT - V8_EXPORT is not a class name.
- public:
- virtual ~UnicodeInputStream();
-
- // Returns the next Unicode code-point in the input, or a negative value when
- // there is no more input in the stream.
- virtual int32_t Next() = 0;
-};
-
-
-// Preparse a JavaScript program. The source code is provided as a
-// UnicodeInputStream. The max_stack_size limits the amount of stack
-// space that the preparser is allowed to use. If the preparser uses
-// more stack space than the limit provided, the result's stack_overflow()
-// method will return true. Otherwise the result contains preparser
-// data that can be used by the V8 parser to speed up parsing.
-PreParserData V8_EXPORT Preparse(UnicodeInputStream* input,
- size_t max_stack_size);
-
-} // namespace v8.
-
-#endif // PREPARSER_H
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 217a938329..0882d64527 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -57,16 +57,17 @@ class V8_EXPORT CpuProfileNode {
*/
int GetLineNumber() const;
+ /**
+ * Returns 1-based number of the column where the function originates.
+ * kNoColumnNumberInfo if no column number information is available.
+ */
+ int GetColumnNumber() const;
+
/** Returns bailout reason for the function
* if the optimization was disabled for it.
*/
const char* GetBailoutReason() const;
- /** DEPRECATED. Please use GetHitCount instead.
- * Returns the count of samples where function was currently executing.
- */
- V8_DEPRECATED(double GetSelfSamplesCount() const);
-
/**
* Returns the count of samples where the function was currently executing.
*/
@@ -85,6 +86,7 @@ class V8_EXPORT CpuProfileNode {
const CpuProfileNode* GetChild(int index) const;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};
@@ -473,6 +475,19 @@ class V8_EXPORT HeapProfiler {
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ /**
+ * Starts recording JS allocations immediately as they arrive and tracking of
+ * heap objects population statistics.
+ */
+ void StartRecordingHeapAllocations();
+
+ /**
+ * Stops recording JS allocations and tracking of heap objects population
+ * statistics, cleans all collected heap objects population statistics data.
+ */
+ void StopRecordingHeapAllocations();
+
+
private:
HeapProfiler();
~HeapProfiler();
diff --git a/deps/v8/include/v8-testing.h b/deps/v8/include/v8-testing.h
index 97b467a91b..ba4fcc44ec 100644
--- a/deps/v8/include/v8-testing.h
+++ b/deps/v8/include/v8-testing.h
@@ -68,8 +68,4 @@ class V8_EXPORT Testing {
} // namespace v8
-
-#undef V8_EXPORT
-
-
#endif // V8_V8_TEST_H_
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index de2733838f..44a74ed5fe 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -135,6 +135,7 @@ class DeclaredAccessorDescriptor;
class ObjectOperationDescriptor;
class RawOperationDescriptor;
class CallHandlerHelper;
+class EscapableHandleScope;
namespace internal {
class Arguments;
@@ -377,7 +378,6 @@ template <class T> class Local : public Handle<T> {
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
*/
- V8_INLINE static Local<T> New(Handle<T> that);
V8_INLINE static Local<T> New(Isolate* isolate, Handle<T> that);
template<class M>
V8_INLINE static Local<T> New(Isolate* isolate,
@@ -401,6 +401,7 @@ template <class T> class Local : public Handle<T> {
friend class Context;
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
+ friend class EscapableHandleScope;
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
};
@@ -480,6 +481,22 @@ class NonCopyablePersistentTraits {
/**
+ * Helper class traits to allow copying and assignment of Persistent.
+ * This will clone the contents of storage cell, but not any of the flags, etc.
+ */
+template<class T>
+struct CopyablePersistentTraits {
+ typedef Persistent<T, CopyablePersistentTraits<T> > CopyablePersistent;
+ static const bool kResetInDestructor = true;
+ template<class S, class M>
+ static V8_INLINE void Copy(const Persistent<S, M>& source,
+ CopyablePersistent* dest) {
+ // do nothing, just allow copy
+ }
+};
+
+
+/**
* An object reference that is independent of any handle scope. Where
* a Local handle only lives as long as the HandleScope in which it was
* allocated, a Persistent handle remains valid until it is explicitly
@@ -567,9 +584,9 @@ template <class T, class M> class Persistent {
*/
template <class S, class M2>
V8_INLINE void Reset(Isolate* isolate, const Persistent<S, M2>& other);
- // TODO(dcarney): deprecate
- V8_INLINE void Dispose() { Reset(); }
- V8_DEPRECATED(V8_INLINE void Dispose(Isolate* isolate)) { Reset(); }
+
+ V8_DEPRECATED("Use Reset instead",
+ V8_INLINE void Dispose()) { Reset(); }
V8_INLINE bool IsEmpty() const { return val_ == 0; }
@@ -625,22 +642,22 @@ template <class T, class M> class Persistent {
P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
- // TODO(dcarney): deprecate
template<typename S, typename P>
- V8_INLINE void MakeWeak(
- P* parameter,
- typename WeakReferenceCallbacks<S, P>::Revivable callback);
+ V8_DEPRECATED(
+ "Use SetWeak instead",
+ V8_INLINE void MakeWeak(
+ P* parameter,
+ typename WeakReferenceCallbacks<S, P>::Revivable callback));
- // TODO(dcarney): deprecate
template<typename P>
- V8_INLINE void MakeWeak(
- P* parameter,
- typename WeakReferenceCallbacks<T, P>::Revivable callback);
+ V8_DEPRECATED(
+ "Use SetWeak instead",
+ V8_INLINE void MakeWeak(
+ P* parameter,
+ typename WeakReferenceCallbacks<T, P>::Revivable callback));
V8_INLINE void ClearWeak();
- V8_DEPRECATED(V8_INLINE void ClearWeak(Isolate* isolate)) { ClearWeak(); }
-
/**
* Marks the reference to this object independent. Garbage collector is free
* to ignore any object groups containing this object. Weak callback for an
@@ -649,10 +666,6 @@ template <class T, class M> class Persistent {
*/
V8_INLINE void MarkIndependent();
- V8_DEPRECATED(V8_INLINE void MarkIndependent(Isolate* isolate)) {
- MarkIndependent();
- }
-
/**
* Marks the reference to this object partially dependent. Partially dependent
* handles only depend on other partially dependent handles and these
@@ -663,56 +676,31 @@ template <class T, class M> class Persistent {
*/
V8_INLINE void MarkPartiallyDependent();
- V8_DEPRECATED(V8_INLINE void MarkPartiallyDependent(Isolate* isolate)) {
- MarkPartiallyDependent();
- }
-
V8_INLINE bool IsIndependent() const;
- V8_DEPRECATED(V8_INLINE bool IsIndependent(Isolate* isolate) const) {
- return IsIndependent();
- }
-
/** Checks if the handle holds the only reference to an object. */
V8_INLINE bool IsNearDeath() const;
- V8_DEPRECATED(V8_INLINE bool IsNearDeath(Isolate* isolate) const) {
- return IsNearDeath();
- }
-
/** Returns true if the handle's reference is weak. */
V8_INLINE bool IsWeak() const;
- V8_DEPRECATED(V8_INLINE bool IsWeak(Isolate* isolate) const) {
- return IsWeak();
- }
-
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
V8_INLINE void SetWrapperClassId(uint16_t class_id);
- V8_DEPRECATED(
- V8_INLINE void SetWrapperClassId(Isolate * isolate, uint16_t class_id)) {
- SetWrapperClassId(class_id);
- }
-
/**
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
V8_INLINE uint16_t WrapperClassId() const;
- V8_DEPRECATED(V8_INLINE uint16_t WrapperClassId(Isolate* isolate) const) {
- return WrapperClassId();
- }
-
- // TODO(dcarney): remove
- V8_INLINE T* ClearAndLeak();
+ V8_DEPRECATED("This will be removed",
+ V8_INLINE T* ClearAndLeak());
- // TODO(dcarney): remove
- V8_INLINE void Clear() { val_ = 0; }
+ V8_DEPRECATED("This will be removed",
+ V8_INLINE void Clear()) { val_ = 0; }
// TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
@@ -724,6 +712,7 @@ template <class T, class M> class Persistent {
V8_INLINE T* operator*() const { return val_; }
private:
+ friend class Isolate;
friend class Utils;
template<class F> friend class Handle;
template<class F> friend class Local;
@@ -757,27 +746,28 @@ class V8_EXPORT HandleScope {
~HandleScope();
- /**
- * Closes the handle scope and returns the value as a handle in the
- * previous scope, which is the new current scope after the call.
- */
- template <class T> Local<T> Close(Handle<T> value);
+ template <class T>
+ V8_DEPRECATED("Use EscapableHandleScope::Escape instead",
+ Local<T> Close(Handle<T> value));
/**
* Counts the number of allocated handles.
*/
static int NumberOfHandles();
+ private:
/**
* Creates a new handle with the given value.
*/
- static internal::Object** CreateHandle(internal::Object* value);
static internal::Object** CreateHandle(internal::Isolate* isolate,
internal::Object* value);
- // Faster version, uses HeapObject to obtain the current Isolate.
- static internal::Object** CreateHandle(internal::HeapObject* value);
+ // Uses HeapObject to obtain the current Isolate.
+ static internal::Object** CreateHandle(internal::HeapObject* heap_object,
+ internal::Object* value);
+
+ V8_INLINE HandleScope() {}
+ void Initialize(Isolate* isolate);
- private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
HandleScope(const HandleScope&);
@@ -798,19 +788,58 @@ class V8_EXPORT HandleScope {
}
};
- void Initialize(Isolate* isolate);
void Leave();
internal::Isolate* isolate_;
internal::Object** prev_next_;
internal::Object** prev_limit_;
+ // TODO(dcarney): remove this field
// Allow for the active closing of HandleScopes which allows to pass a handle
// from the HandleScope being closed to the next top most HandleScope.
bool is_closed_;
internal::Object** RawClose(internal::Object** value);
friend class ImplementationUtilities;
+ friend class EscapableHandleScope;
+ template<class F> friend class Handle;
+ template<class F> friend class Local;
+ friend class Object;
+ friend class Context;
+};
+
+
+/**
+ * A HandleScope which first allocates a handle in the current scope
+ * which will be later filled with the escape value.
+ */
+class V8_EXPORT EscapableHandleScope : public HandleScope {
+ public:
+ EscapableHandleScope(Isolate* isolate);
+ V8_INLINE ~EscapableHandleScope() {}
+
+ /**
+ * Pushes the value into the previous scope and returns a handle to it.
+ * Cannot be called twice.
+ */
+ template <class T>
+ V8_INLINE Local<T> Escape(Local<T> value) {
+ internal::Object** slot =
+ Escape(reinterpret_cast<internal::Object**>(*value));
+ return Local<T>(reinterpret_cast<T*>(slot));
+ }
+
+ private:
+ internal::Object** Escape(internal::Object** escape_value);
+
+ // Make it hard to create heap-allocated or illegal handle scopes by
+ // disallowing certain operations.
+ EscapableHandleScope(const EscapableHandleScope&);
+ void operator=(const EscapableHandleScope&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+
+ internal::Object** escape_slot_;
};
@@ -857,7 +886,9 @@ class V8_EXPORT ScriptData { // NOLINT
* \param input Pointer to UTF-8 script source code.
* \param length Length of UTF-8 script source code.
*/
- static ScriptData* PreCompile(const char* input, int length);
+ static ScriptData* PreCompile(Isolate* isolate,
+ const char* input,
+ int length);
/**
* Pre-compiles the specified script (context-independent).
@@ -1009,9 +1040,8 @@ class V8_EXPORT Script {
/**
* Returns the script id value.
- * DEPRECATED: Please use GetId().
*/
- Local<Value> Id();
+ V8_DEPRECATED("Use GetId instead", Local<Value> Id());
/**
* Returns the script id.
@@ -1463,6 +1493,7 @@ class V8_EXPORT Value : public Data {
/** JS == */
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
+ bool SameValue(Handle<Value> that) const;
template <class T> V8_INLINE static Value* Cast(T* value);
@@ -1516,11 +1547,6 @@ class V8_EXPORT String : public Primitive {
int Utf8Length() const;
/**
- * This function is no longer useful.
- */
- V8_DEPRECATED(V8_INLINE bool MayContainNonAscii() const) { return true; }
-
- /**
* Returns whether this string is known to contain only one byte data.
* Does not read the string.
* False negatives are possible.
@@ -1570,11 +1596,6 @@ class V8_EXPORT String : public Primitive {
int start = 0,
int length = -1,
int options = NO_OPTIONS) const;
- // ASCII characters.
- V8_DEPRECATED(int WriteAscii(char* buffer,
- int start = 0,
- int length = -1,
- int options = NO_OPTIONS) const);
// One byte characters.
int WriteOneByte(uint8_t* buffer,
int start = 0,
@@ -1705,24 +1726,29 @@ class V8_EXPORT String : public Primitive {
V8_INLINE static String* Cast(v8::Value* obj);
- // TODO(dcarney): deprecate
/**
* Allocates a new string from either UTF-8 encoded or ASCII data.
* The second parameter 'length' gives the buffer length. If omitted,
* the function calls 'strlen' to determine the buffer length.
*/
- V8_INLINE static Local<String> New(const char* data, int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromOneByte instead",
+ V8_INLINE static Local<String> New(const char* data, int length = -1));
- // TODO(dcarney): deprecate
/** Allocates a new string from 16-bit character codes.*/
- V8_INLINE static Local<String> New(const uint16_t* data, int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromTwoByte instead",
+ V8_INLINE static Local<String> New(
+ const uint16_t* data, int length = -1));
- // TODO(dcarney): deprecate
/**
* Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already.
*/
- V8_INLINE static Local<String> NewSymbol(const char* data, int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromUtf8 instead",
+ V8_INLINE static Local<String> NewSymbol(
+ const char* data, int length = -1));
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
@@ -1801,15 +1827,17 @@ class V8_EXPORT String : public Primitive {
*/
bool CanMakeExternal();
- // TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
- V8_INLINE static Local<String> NewUndetectable(const char* data,
- int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromUtf8 instead",
+ V8_INLINE static Local<String> NewUndetectable(const char* data,
+ int length = -1));
- // TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied 16-bit character codes.*/
- V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
- int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromTwoByte instead",
+ V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
+ int length = -1));
/**
* Converts an object to a UTF-8-encoded character array. Useful if
@@ -1843,8 +1871,8 @@ class V8_EXPORT String : public Primitive {
*/
class V8_EXPORT AsciiValue {
public:
- // TODO(dcarney): deprecate
- explicit AsciiValue(Handle<v8::Value> obj);
+ V8_DEPRECATED("Use Utf8Value instead",
+ explicit AsciiValue(Handle<v8::Value> obj));
~AsciiValue();
char* operator*() { return str_; }
const char* operator*() const { return str_; }
@@ -2265,7 +2293,7 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- Local<Value> CallAsFunction(Handle<Object> recv,
+ Local<Value> CallAsFunction(Handle<Value> recv,
int argc,
Handle<Value> argv[]);
@@ -2364,17 +2392,18 @@ class FunctionCallbackInfo {
V8_INLINE Isolate* GetIsolate() const;
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
+ static const int kArgsLength = 7;
protected:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
- static const int kReturnValueIndex = 0;
- static const int kReturnValueDefaultValueIndex = -1;
- static const int kIsolateIndex = -2;
- static const int kDataIndex = -3;
- static const int kCalleeIndex = -4;
- static const int kHolderIndex = -5;
+ static const int kHolderIndex = 0;
+ static const int kIsolateIndex = 1;
+ static const int kReturnValueDefaultValueIndex = 2;
+ static const int kReturnValueIndex = 3;
+ static const int kDataIndex = 4;
+ static const int kCalleeIndex = 5;
+ static const int kContextSaveIndex = 6;
V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
@@ -2406,12 +2435,12 @@ class PropertyCallbackInfo {
friend class MacroAssembler;
friend class internal::PropertyCallbackArguments;
friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kThisIndex = 0;
- static const int kDataIndex = -1;
- static const int kReturnValueIndex = -2;
- static const int kReturnValueDefaultValueIndex = -3;
- static const int kIsolateIndex = -4;
- static const int kHolderIndex = -5;
+ static const int kHolderIndex = 0;
+ static const int kIsolateIndex = 1;
+ static const int kReturnValueDefaultValueIndex = 2;
+ static const int kReturnValueIndex = 3;
+ static const int kDataIndex = 4;
+ static const int kThisIndex = 5;
V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
internal::Object** args_;
@@ -2437,7 +2466,7 @@ class V8_EXPORT Function : public Object {
Local<Object> NewInstance() const;
Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
- Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
+ Local<Value> Call(Handle<Value> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
@@ -2450,6 +2479,12 @@ class V8_EXPORT Function : public Object {
Handle<Value> GetInferredName() const;
/**
+ * User-defined name assigned to the "displayName" property of this function.
+ * Used to facilitate debugging and profiling of JavaScript code.
+ */
+ Handle<Value> GetDisplayName() const;
+
+ /**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
*/
@@ -2461,10 +2496,14 @@ class V8_EXPORT Function : public Object {
int GetScriptColumnNumber() const;
/**
+ * Tells whether this function is builtin.
+ */
+ bool IsBuiltin() const;
+
+ /**
* Returns scriptId object.
- * DEPRECATED: use ScriptId() instead.
*/
- Handle<Value> GetScriptId() const;
+ V8_DEPRECATED("Use ScriptId instead", Handle<Value> GetScriptId()) const;
/**
* Returns scriptId.
@@ -2627,10 +2666,6 @@ class V8_EXPORT ArrayBufferView : public Object {
* Size of a view in bytes.
*/
size_t ByteLength();
- /**
- * Base address of a view.
- */
- void* BaseAddress();
V8_INLINE static ArrayBufferView* Cast(Value* obj);
@@ -2830,9 +2865,9 @@ class V8_EXPORT Date : public Object {
public:
static Local<Value> New(double time);
- // Deprecated, use Date::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- double NumberValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ double NumberValue()) const { return ValueOf(); }
/**
* A specialization of Value::NumberValue that is more efficient
@@ -2868,9 +2903,9 @@ class V8_EXPORT NumberObject : public Object {
public:
static Local<Value> New(double value);
- // Deprecated, use NumberObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- double NumberValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ double NumberValue()) const { return ValueOf(); }
/**
* Returns the Number held by the object.
@@ -2891,9 +2926,9 @@ class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(bool value);
- // Deprecated, use BooleanObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- bool BooleanValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ bool BooleanValue()) const { return ValueOf(); }
/**
* Returns the Boolean held by the object.
@@ -2914,9 +2949,9 @@ class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Handle<String> value);
- // Deprecated, use StringObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- Local<String> StringValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ Local<String> StringValue()) const { return ValueOf(); }
/**
* Returns the String held by the object.
@@ -2939,9 +2974,9 @@ class V8_EXPORT SymbolObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
- // Deprecated, use SymbolObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- Local<Symbol> SymbolValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ Local<Symbol> SymbolValue()) const { return ValueOf(); }
/**
* Returns the Symbol held by the object.
@@ -3744,23 +3779,18 @@ class V8_EXPORT ResourceConstraints {
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
- Maybe<bool> is_memory_constrained() const { return is_memory_constrained_; }
- // If set to true, V8 will limit it's memory usage, at the potential cost of
- // lower performance. Note, this option is a tentative addition to the API
- // and may be removed or modified without warning.
- void set_memory_constrained(bool value) {
- is_memory_constrained_ = Maybe<bool>(value);
- }
private:
int max_young_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
- Maybe<bool> is_memory_constrained_;
};
+/**
+ * Sets the given ResourceConstraints on the current isolate.
+ */
bool V8_EXPORT SetResourceConstraints(ResourceConstraints* constraints);
@@ -3773,13 +3803,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
-/**
- * Schedules an exception to be thrown when returning to JavaScript. When an
- * exception has been scheduled it is illegal to invoke any JavaScript
- * operation; the caller must return immediately and only after the exception
- * has been handled does it become legal to invoke JavaScript operations.
- */
-Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception);
+V8_DEPRECATED(
+ "Use Isolate::ThrowException instead",
+ Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception));
/**
* Create new error objects by calling the corresponding error object
@@ -3870,8 +3896,6 @@ enum GCCallbackFlags {
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
-typedef void (*GCCallback)();
-
/**
* Collection of V8 heap information.
@@ -4019,10 +4043,31 @@ class V8_EXPORT Isolate {
*/
CpuProfiler* GetCpuProfiler();
+ /** Returns true if this isolate has a current context. */
+ bool InContext();
+
/** Returns the context that is on the top of the stack. */
Local<Context> GetCurrentContext();
/**
+ * Returns the context of the calling JavaScript code. That is the
+ * context of the top-most JavaScript frame. If there are no
+ * JavaScript frames an empty handle is returned.
+ */
+ Local<Context> GetCallingContext();
+
+ /** Returns the last entered context. */
+ Local<Context> GetEnteredContext();
+
+ /**
+ * Schedules an exception to be thrown when returning to JavaScript. When an
+ * exception has been scheduled it is illegal to invoke any JavaScript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke JavaScript operations.
+ */
+ Local<Value> ThrowException(Local<Value> exception);
+
+ /**
* Allows the host application to group objects together. If one
* object in the group is alive, all objects in the group are alive.
* After each garbage collection, object groups are removed. It is
@@ -4033,8 +4078,8 @@ class V8_EXPORT Isolate {
* garbage collection types it is sufficient to provide object groups
* for partially dependent handles only.
*/
- void SetObjectGroupId(const Persistent<Value>& object,
- UniqueId id);
+ template<typename T> void SetObjectGroupId(const Persistent<T>& object,
+ UniqueId id);
/**
* Allows the host application to declare implicit references from an object
@@ -4043,8 +4088,8 @@ class V8_EXPORT Isolate {
* are removed. It is intended to be used in the before-garbage-collection
* callback function.
*/
- void SetReferenceFromGroup(UniqueId id,
- const Persistent<Value>& child);
+ template<typename T> void SetReferenceFromGroup(UniqueId id,
+ const Persistent<T>& child);
/**
* Allows the host application to declare implicit references from an object
@@ -4052,8 +4097,53 @@ class V8_EXPORT Isolate {
* too. After each garbage collection, all implicit references are removed. It
* is intended to be used in the before-garbage-collection callback function.
*/
- void SetReference(const Persistent<Object>& parent,
- const Persistent<Value>& child);
+ template<typename T, typename S>
+ void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
+
+ typedef void (*GCPrologueCallback)(Isolate* isolate,
+ GCType type,
+ GCCallbackFlags flags);
+ typedef void (*GCEpilogueCallback)(Isolate* isolate,
+ GCType type,
+ GCCallbackFlags flags);
+
+ /**
+ * Enables the host application to receive a notification before a
+ * garbage collection. Allocations are not allowed in the
+ * callback function, you therefore cannot manipulate objects (set
+ * or delete properties for example) since it is possible such
+ * operations will result in the allocation of objects. It is possible
+ * to specify the GCType filter for your callback. But it is not possible to
+ * register the same callback function two times with different
+ * GCType filters.
+ */
+ void AddGCPrologueCallback(
+ GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCPrologueCallback function.
+ */
+ void RemoveGCPrologueCallback(GCPrologueCallback callback);
+
+ /**
+ * Enables the host application to receive a notification after a
+ * garbage collection. Allocations are not allowed in the
+ * callback function, you therefore cannot manipulate objects (set
+ * or delete properties for example) since it is possible such
+ * operations will result in the allocation of objects. It is possible
+ * to specify the GCType filter for your callback. But it is not possible to
+ * register the same callback function two times with different
+ * GCType filters.
+ */
+ void AddGCEpilogueCallback(
+ GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCEpilogueCallback function.
+ */
+ void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
private:
Isolate();
@@ -4062,8 +4152,11 @@ class V8_EXPORT Isolate {
Isolate& operator=(const Isolate&);
void* operator new(size_t size);
void operator delete(void*, size_t);
-};
+ void SetObjectGroupId(internal::Object** object, UniqueId id);
+ void SetReferenceFromGroup(UniqueId id, internal::Object** object);
+ void SetReference(internal::Object** parent, internal::Object** child);
+};
class V8_EXPORT StartupData {
public:
@@ -4412,16 +4505,6 @@ class V8_EXPORT V8 {
static void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
- * The function is deprecated. Please use AddGCPrologueCallback instead.
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects.
- */
- V8_DEPRECATED(static void SetGlobalGCPrologueCallback(GCCallback));
-
- /**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
@@ -4441,16 +4524,6 @@ class V8_EXPORT V8 {
static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
- * The function is deprecated. Please use AddGCEpilogueCallback instead.
- * Enables the host application to receive a notification after a
- * major garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects.
- */
- V8_DEPRECATED(static void SetGlobalGCEpilogueCallback(GCCallback));
-
- /**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
@@ -4498,11 +4571,6 @@ class V8_EXPORT V8 {
ReturnAddressLocationResolver return_address_resolver);
/**
- * Deprecated, use the variant with the Isolate parameter below instead.
- */
- V8_DEPRECATED(static bool SetFunctionEntryHook(FunctionEntryHook entry_hook));
-
- /**
* Allows the host application to provide the address of a function that's
* invoked on entry to every V8-generated function.
* Note that \p entry_hook is invoked at the very start of each
@@ -4541,10 +4609,10 @@ class V8_EXPORT V8 {
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
- // TODO(svenpanne) Really deprecate me when Chrome is fixed.
- /** Deprecated. Use Isolate::AdjustAmountOfExternalAllocatedMemory instead. */
- static intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes);
+ V8_DEPRECATED(
+ "Use Isolate::AdjustAmountOfExternalAllocatedMemory instead",
+ static intptr_t AdjustAmountOfExternalAllocatedMemory(
+ intptr_t change_in_bytes));
/**
* Forcefully terminate the current thread of JavaScript execution
@@ -4599,9 +4667,6 @@ class V8_EXPORT V8 {
*/
static bool Dispose();
- /** Deprecated. Use Isolate::GetHeapStatistics instead. */
- V8_DEPRECATED(static void GetHeapStatistics(HeapStatistics* heap_statistics));
-
/**
* Iterates through all external resources referenced from current isolate
* heap. GC is not invoked prior to iterating, therefore there is no
@@ -4899,25 +4964,14 @@ class V8_EXPORT Context {
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>());
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(static Persistent<Context> New(
- ExtensionConfiguration* extensions = NULL,
- Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
- Handle<Value> global_object = Handle<Value>()));
+ V8_DEPRECATED("Use Isolate::GetEnteredContext instead",
+ static Local<Context> GetEntered());
- /** Returns the last entered context. */
- static Local<Context> GetEntered();
+ V8_DEPRECATED("Use Isolate::GetCurrentContext instead",
+ static Local<Context> GetCurrent());
- // TODO(svenpanne) Actually deprecate this.
- /** Deprecated. Use Isolate::GetCurrentContext instead. */
- static Local<Context> GetCurrent();
-
- /**
- * Returns the context of the calling JavaScript code. That is the
- * context of the top-most JavaScript frame. If there are no
- * JavaScript frames an empty handle is returned.
- */
- static Local<Context> GetCalling();
+ V8_DEPRECATED("Use Isolate::GetCallingContext instead",
+ static Local<Context> GetCalling());
/**
* Sets the security token for the context. To access an object in
@@ -4948,8 +5002,8 @@ class V8_EXPORT Context {
/** Returns true if the context has experienced an out of memory situation. */
bool HasOutOfMemoryException();
- /** Returns true if V8 has a current context. */
- static bool InContext();
+ V8_DEPRECATED("Use Isolate::InContext instead",
+ static bool InContext());
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@@ -5020,8 +5074,9 @@ class V8_EXPORT Context {
explicit V8_INLINE Scope(Handle<Context> context) : context_(context) {
context_->Enter();
}
- // TODO(dcarney): deprecate
- V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context) // NOLINT
+ V8_DEPRECATED(
+ "Use Handle version instead",
+ V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
: context_(Handle<Context>::New(isolate, context)) {
context_->Enter();
}
@@ -5125,9 +5180,6 @@ class V8_EXPORT Unlocker {
*/
V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(Unlocker());
-
~Unlocker();
private:
void Initialize(Isolate* isolate);
@@ -5143,9 +5195,6 @@ class V8_EXPORT Locker {
*/
V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(Locker());
-
~Locker();
/**
@@ -5155,12 +5204,12 @@ class V8_EXPORT Locker {
* that will switch between multiple threads that are in contention
* for the V8 lock.
*/
- static void StartPreemption(int every_n_ms);
+ static void StartPreemption(Isolate* isolate, int every_n_ms);
/**
* Stop preemption.
*/
- static void StopPreemption();
+ static void StopPreemption(Isolate* isolate);
/**
* Returns whether or not the locker for a given isolate, is locked by the
@@ -5359,7 +5408,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 131;
+ static const int kEmptyStringRootIndex = 132;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5370,7 +5419,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xb1;
+ static const int kJSObjectType = 0xb2;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -5378,7 +5427,7 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
- static void CheckInitializedImpl(v8::Isolate* isolate);
+ V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
CheckInitializedImpl(isolate);
@@ -5494,19 +5543,6 @@ Local<T>::Local() : Handle<T>() { }
template <class T>
-Local<T> Local<T>::New(Handle<T> that) {
- if (that.IsEmpty()) return Local<T>();
- T* that_ptr = *that;
- internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
- if (internal::Internals::CanCastToHeapObject(that_ptr)) {
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
- reinterpret_cast<internal::HeapObject*>(*p))));
- }
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
-}
-
-
-template <class T>
Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
}
@@ -5847,7 +5883,7 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
- if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
+ if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
}
@@ -5929,7 +5965,8 @@ Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
Handle<Boolean> Boolean::New(bool value) {
- return value ? True() : False();
+ Isolate* isolate = Isolate::GetCurrent();
+ return value ? True(isolate) : False(isolate);
}
@@ -5941,6 +5978,7 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
+ typedef internal::HeapObject HO;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
@@ -5948,7 +5986,7 @@ Local<Value> Object::GetInternalField(int index) {
if (I::GetInstanceType(obj) == I::kJSObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
- O** result = HandleScope::CreateHandle(value);
+ O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
return Local<Value>(reinterpret_cast<Value*>(result));
}
#endif
@@ -6397,11 +6435,41 @@ void* Isolate::GetData() {
}
+template<typename T>
+void Isolate::SetObjectGroupId(const Persistent<T>& object,
+ UniqueId id) {
+ TYPE_CHECK(Value, T);
+ SetObjectGroupId(reinterpret_cast<v8::internal::Object**>(object.val_), id);
+}
+
+
+template<typename T>
+void Isolate::SetReferenceFromGroup(UniqueId id,
+ const Persistent<T>& object) {
+ TYPE_CHECK(Value, T);
+ SetReferenceFromGroup(id,
+ reinterpret_cast<v8::internal::Object**>(object.val_));
+}
+
+
+template<typename T, typename S>
+void Isolate::SetReference(const Persistent<T>& parent,
+ const Persistent<S>& child) {
+ TYPE_CHECK(Object, T);
+ TYPE_CHECK(Value, S);
+ SetReference(reinterpret_cast<v8::internal::Object**>(parent.val_),
+ reinterpret_cast<v8::internal::Object**>(child.val_));
+}
+
+
Local<Value> Context::GetEmbedderData(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
+ typedef internal::HeapObject HO;
typedef internal::Internals I;
- O** result = HandleScope::CreateHandle(I::ReadEmbedderData<O*>(this, index));
+ HO* context = *reinterpret_cast<HO**>(this);
+ O** result =
+ HandleScope::CreateHandle(context, I::ReadEmbedderData<O*>(this, index));
return Local<Value>(reinterpret_cast<Value*>(result));
#else
return SlowGetEmbedderData(index);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 6fe5c5aabc..834f9c50ec 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -245,6 +245,7 @@
// older compilers.
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
@@ -320,12 +321,16 @@
// A macro to mark classes or functions as deprecated.
-#if !V8_DISABLE_DEPRECATIONS && V8_HAS_ATTRIBUTE_DEPRECATED
-# define V8_DEPRECATED(declarator) declarator __attribute__((deprecated))
-#elif !V8_DISABLE_DEPRECATIONS && V8_HAS_DECLSPEC_DEPRECATED
-# define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
+#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
+# define V8_DEPRECATED(message, declarator) \
+declarator __attribute__((deprecated(message)))
+#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
+# define V8_DEPRECATED(message, declarator) \
+declarator __attribute__((deprecated))
+#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
+# define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#else
-# define V8_DEPRECATED(declarator) declarator
+# define V8_DEPRECATED(message, declarator) declarator
#endif
diff --git a/deps/v8/preparser/preparser-process.cc b/deps/v8/preparser/preparser-process.cc
deleted file mode 100644
index b816744303..0000000000
--- a/deps/v8/preparser/preparser-process.cc
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "../include/v8.h"
-#include "../include/v8stdint.h"
-#include "../include/v8-preparser.h"
-
-#include "../src/preparse-data-format.h"
-
-namespace i = v8::internal;
-
-// This file is only used for testing the preparser.
-// The first argument must be the path of a JavaScript source file, or
-// the flags "-e" and the next argument is then the source of a JavaScript
-// program.
-// Optionally this can be followed by the word "throws" (case sensitive),
-// which signals that the parsing is expected to throw - the default is
-// to expect the parsing to not throw.
-// The command line can further be followed by a message text (the
-// *type* of the exception to throw), and even more optionally, the
-// start and end position reported with the exception.
-//
-// This source file is preparsed and tested against the expectations, and if
-// successful, the resulting preparser data is written to stdout.
-// Diagnostic output is output on stderr.
-// The source file must contain only ASCII characters (UTF-8 isn't supported).
-// The file is read into memory, so it should have a reasonable size.
-
-
-// Adapts an ASCII string to the UnicodeInputStream interface.
-class AsciiInputStream : public v8::UnicodeInputStream {
- public:
- AsciiInputStream(const uint8_t* buffer, size_t length)
- : buffer_(buffer),
- end_offset_(static_cast<int>(length)),
- offset_(0) { }
-
- virtual ~AsciiInputStream() { }
-
- virtual void PushBack(int32_t ch) {
- offset_--;
-#ifdef DEBUG
- if (offset_ < 0 ||
- (ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) {
- fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_);
- exit(1);
- }
-#endif
- }
-
- virtual int32_t Next() {
- if (offset_ >= end_offset_) {
- offset_++; // Increment anyway to allow symmetric pushbacks.
- return -1;
- }
- uint8_t next_char = buffer_[offset_];
-#ifdef DEBUG
- if (next_char > 0x7fu) {
- fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
- exit(1);
- }
-#endif
- offset_++;
- return static_cast<int32_t>(next_char);
- }
-
- private:
- const uint8_t* buffer_;
- const int end_offset_;
- int offset_;
-};
-
-
-bool ReadBuffer(FILE* source, void* buffer, size_t length) {
- size_t actually_read = fread(buffer, 1, length, source);
- return (actually_read == length);
-}
-
-
-bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
- size_t actually_written = fwrite(buffer, 1, length, dest);
- return (actually_written == length);
-}
-
-
-class PreparseDataInterpreter {
- public:
- PreparseDataInterpreter(const uint8_t* data, int length)
- : data_(data), length_(length), message_(NULL) { }
-
- ~PreparseDataInterpreter() {
- if (message_ != NULL) delete[] message_;
- }
-
- bool valid() {
- int header_length =
- i::PreparseDataConstants::kHeaderSize * sizeof(int); // NOLINT
- return length_ >= header_length;
- }
-
- bool throws() {
- return valid() &&
- word(i::PreparseDataConstants::kHasErrorOffset) != 0;
- }
-
- const char* message() {
- if (message_ != NULL) return message_;
- if (!throws()) return NULL;
- int text_pos = i::PreparseDataConstants::kHeaderSize +
- i::PreparseDataConstants::kMessageTextPos;
- int length = word(text_pos);
- char* buffer = new char[length + 1];
- for (int i = 1; i <= length; i++) {
- int character = word(text_pos + i);
- buffer[i - 1] = character;
- }
- buffer[length] = '\0';
- message_ = buffer;
- return buffer;
- }
-
- int beg_pos() {
- if (!throws()) return -1;
- return word(i::PreparseDataConstants::kHeaderSize +
- i::PreparseDataConstants::kMessageStartPos);
- }
-
- int end_pos() {
- if (!throws()) return -1;
- return word(i::PreparseDataConstants::kHeaderSize +
- i::PreparseDataConstants::kMessageEndPos);
- }
-
- private:
- int word(int offset) {
- const int* word_data = reinterpret_cast<const int*>(data_);
- if (word_data + offset < reinterpret_cast<const int*>(data_ + length_)) {
- return word_data[offset];
- }
- return -1;
- }
-
- const uint8_t* const data_;
- const int length_;
- const char* message_;
-};
-
-
-template <typename T>
-class ScopedPointer {
- public:
- explicit ScopedPointer() : pointer_(NULL) {}
- explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
- ~ScopedPointer() { if (pointer_ != NULL) delete[] pointer_; }
- T& operator[](int index) { return pointer_[index]; }
- T* operator*() { return pointer_ ;}
- T* operator=(T* new_value) {
- if (pointer_ != NULL) delete[] pointer_;
- pointer_ = new_value;
- return new_value;
- }
- private:
- T* pointer_;
-};
-
-
-
-void fail(v8::PreParserData* data, const char* message, ...) {
- va_list args;
- va_start(args, message);
- vfprintf(stderr, message, args);
- va_end(args);
- fflush(stderr);
- if (data != NULL) {
- // Print preparser data to stdout.
- uint32_t size = static_cast<uint32_t>(data->size());
- fprintf(stderr, "LOG: data size: %u\n", size);
- if (!WriteBuffer(stdout, data->data(), size)) {
- perror("ERROR: Writing data");
- fflush(stderr);
- }
- }
- exit(EXIT_FAILURE);
-}
-
-
-bool IsFlag(const char* arg) {
- // Anything starting with '-' is considered a flag.
- // It's summarily ignored for now.
- return arg[0] == '-';
-}
-
-
-struct ExceptionExpectation {
- ExceptionExpectation()
- : throws(false), type(NULL), beg_pos(-1), end_pos(-1) { }
- bool throws;
- const char* type;
- int beg_pos;
- int end_pos;
-};
-
-
-void CheckException(v8::PreParserData* data,
- ExceptionExpectation* expects) {
- PreparseDataInterpreter reader(data->data(), static_cast<int>(data->size()));
- if (expects->throws) {
- if (!reader.throws()) {
- if (expects->type == NULL) {
- fail(data, "Didn't throw as expected\n");
- } else {
- fail(data, "Didn't throw \"%s\" as expected\n", expects->type);
- }
- }
- if (expects->type != NULL) {
- const char* actual_message = reader.message();
- if (strcmp(expects->type, actual_message)) {
- fail(data, "Wrong error message. Expected <%s>, found <%s> at %d..%d\n",
- expects->type, actual_message, reader.beg_pos(), reader.end_pos());
- }
- }
- if (expects->beg_pos >= 0) {
- if (expects->beg_pos != reader.beg_pos()) {
- fail(data, "Wrong error start position: Expected %i, found %i\n",
- expects->beg_pos, reader.beg_pos());
- }
- }
- if (expects->end_pos >= 0) {
- if (expects->end_pos != reader.end_pos()) {
- fail(data, "Wrong error end position: Expected %i, found %i\n",
- expects->end_pos, reader.end_pos());
- }
- }
- } else if (reader.throws()) {
- const char* message = reader.message();
- fail(data, "Throws unexpectedly with message: %s at location %d-%d\n",
- message, reader.beg_pos(), reader.end_pos());
- }
-}
-
-
-ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
- // Parse ["throws" [<exn-type> [<start> [<end>]]]].
- ExceptionExpectation expects;
- int arg_index = 0;
- while (argc > arg_index && strncmp("throws", argv[arg_index], 7)) {
- arg_index++;
- }
- if (argc > arg_index) {
- expects.throws = true;
- arg_index++;
- if (argc > arg_index && !IsFlag(argv[arg_index])) {
- expects.type = argv[arg_index];
- arg_index++;
- if (argc > arg_index && !IsFlag(argv[arg_index])) {
- expects.beg_pos = atoi(argv[arg_index]); // NOLINT
- arg_index++;
- if (argc > arg_index && !IsFlag(argv[arg_index])) {
- expects.end_pos = atoi(argv[arg_index]); // NOLINT
- }
- }
- }
- }
- return expects;
-}
-
-
-int main(int argc, const char* argv[]) {
- // Parse command line.
- // Format: preparser (<scriptfile> | -e "<source>")
- // ["throws" [<exn-type> [<start> [<end>]]]]
- // Any flags (except an initial -e) are ignored.
- // Flags must not separate "throws" and its arguments.
-
- // Check for mandatory filename argument.
- int arg_index = 1;
- if (argc <= arg_index) {
- fail(NULL, "ERROR: No filename on command line.\n");
- }
- const uint8_t* source = NULL;
- const char* filename = argv[arg_index];
- if (!strcmp(filename, "-e")) {
- arg_index++;
- if (argc <= arg_index) {
- fail(NULL, "ERROR: No source after -e on command line.\n");
- }
- source = reinterpret_cast<const uint8_t*>(argv[arg_index]);
- }
- // Check remainder of command line for exception expectations.
- arg_index++;
- ExceptionExpectation expects =
- ParseExpectation(argc - arg_index, argv + arg_index);
-
- v8::V8::Initialize();
-
- ScopedPointer<uint8_t> buffer;
- size_t length;
-
- if (source == NULL) {
- // Open JS file.
- FILE* input = fopen(filename, "rb");
- if (input == NULL) {
- perror("ERROR: Error opening file");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- // Find length of JS file.
- if (fseek(input, 0, SEEK_END) != 0) {
- perror("ERROR: Error during seek");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- length = static_cast<size_t>(ftell(input));
- rewind(input);
- // Read JS file into memory buffer.
- buffer = new uint8_t[length];
- if (!ReadBuffer(input, *buffer, length)) {
- perror("ERROR: Reading file");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- fclose(input);
- source = *buffer;
- } else {
- length = strlen(reinterpret_cast<const char*>(source));
- }
-
- // Preparse input file.
- AsciiInputStream input_buffer(source, length);
- size_t kMaxStackSize = 64 * 1024 * sizeof(void*); // NOLINT
- v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
-
- // Fail if stack overflow.
- if (data.stack_overflow()) {
- fail(&data, "ERROR: Stack overflow\n");
- }
-
- // Check that the expected exception is thrown, if an exception is
- // expected.
- CheckException(&data, &expects);
-
- return EXIT_SUCCESS;
-}
diff --git a/deps/v8/preparser/preparser.gyp b/deps/v8/preparser/preparser.gyp
deleted file mode 100644
index 23cbfff644..0000000000
--- a/deps/v8/preparser/preparser.gyp
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- },
- 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
- 'targets': [
- {
- 'target_name': 'preparser',
- 'type': 'executable',
- 'conditions': [
- # preparser can't link against a shared library, so link against
- # the underlying static targets.
- ['v8_use_snapshot=="true"', {
- 'dependencies': ['../tools/gyp/v8.gyp:v8_snapshot'],
- }, {
- 'dependencies': [
- '../tools/gyp/v8.gyp:v8_nosnapshot.<(v8_target_arch)',
- ],
- }],
- ],
- 'include_dirs+': [
- '../src',
- ],
- 'sources': [
- 'preparser-process.cc',
- '../include/v8-preparser.h',
- '../src/preparser-api.cc',
- ],
- },
- ],
-}
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 42048202fd..5068c885e9 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -259,7 +259,7 @@ int RunMain(int argc, char* argv[]) {
if (cycle_type == CycleInCpp) {
bool res = RunCppCycle(script,
- v8::Context::GetCurrent(),
+ isolate->GetCurrentContext(),
report_exceptions);
return !res;
} else {
@@ -296,7 +296,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> input_line = ReadLine();
- if (input_line == v8::Undefined()) {
+ if (input_line == v8::Undefined(isolate)) {
continue;
}
@@ -306,7 +306,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Handle<v8::Value> result;
{
v8::TryCatch try_catch;
- result = process_fun->Call(v8::Context::GetCurrent()->Global(),
+ result = process_fun->Call(isolate->GetCurrentContext()->Global(),
argc, argv);
if (try_catch.HasCaught()) {
if (report_exceptions)
@@ -417,7 +417,7 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// function is called. Reads a string from standard input and returns.
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
- v8::ThrowException(v8::String::New("Unexpected arguments"));
+ args.GetIsolate()->ThrowException(v8::String::New("Unexpected arguments"));
return;
}
args.GetReturnValue().Set(ReadLine());
@@ -436,7 +436,7 @@ v8::Handle<v8::String> ReadLine() {
res = fgets(buffer, kBufferSize, stdin);
}
if (res == NULL) {
- v8::Handle<v8::Primitive> t = v8::Undefined();
+ v8::Handle<v8::Primitive> t = v8::Undefined(v8::Isolate::GetCurrent());
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp
index be7b9ea696..dfc7410070 100644
--- a/deps/v8/samples/samples.gyp
+++ b/deps/v8/samples/samples.gyp
@@ -28,7 +28,7 @@
{
'variables': {
'v8_code': 1,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'target_defaults': {
@@ -42,13 +42,13 @@
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
],
}],
],
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 710547c341..06bd8f67eb 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -140,17 +140,20 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// the argument into a JavaScript string.
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- v8::ThrowException(v8::String::New("Bad parameters"));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("Bad parameters"));
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("Error loading file"));
return;
}
args.GetReturnValue().Set(source);
@@ -165,12 +168,14 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(),
@@ -178,7 +183,8 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::String::New(*file),
false,
false)) {
- v8::ThrowException(v8::String::New("Error executing file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("Error executing file"));
return;
}
}
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
new file mode 100644
index 0000000000..f38fecad4e
--- /dev/null
+++ b/deps/v8/src/OWNERS
@@ -0,0 +1,2 @@
+per-file i18n.*=cira@chromium.org
+per-file i18n.*=mnita@google.com
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 669c02baf3..50232661c1 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -78,6 +78,61 @@ MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
}
+static V8_INLINE bool CheckForName(Handle<String> name,
+ String* property_name,
+ int offset,
+ int* object_offset) {
+ if (name->Equals(property_name)) {
+ *object_offset = offset;
+ return true;
+ }
+ return false;
+}
+
+
+bool Accessors::IsJSObjectFieldAccessor(
+ Handle<Map> map, Handle<String> name,
+ int* object_offset) {
+ Isolate* isolate = map->GetIsolate();
+ switch (map->instance_type()) {
+ case JS_ARRAY_TYPE:
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ JSArray::kLengthOffset, object_offset);
+ case JS_TYPED_ARRAY_TYPE:
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ JSTypedArray::kLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSTypedArray::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_offset_string(),
+ JSTypedArray::kByteOffsetOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->buffer_string(),
+ JSTypedArray::kBufferOffset, object_offset);
+ case JS_ARRAY_BUFFER_TYPE:
+ return
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSArrayBuffer::kByteLengthOffset, object_offset);
+ case JS_DATA_VIEW_TYPE:
+ return
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSDataView::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_offset_string(),
+ JSDataView::kByteOffsetOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->buffer_string(),
+ JSDataView::kBufferOffset, object_offset);
+ default: {
+ if (map->instance_type() < FIRST_NONSTRING_TYPE) {
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ String::kLengthOffset, object_offset);
+ }
+ return false;
+ }
+ }
+}
+
+
//
// Accessors::ArrayLength
//
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index d9a2130f61..b2dee27932 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -86,6 +86,13 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeModuleExport(
Handle<String> name, int index, PropertyAttributes attributes);
+ // Returns true for properties that are accessors to object fields.
+ // If true, *object_offset contains offset of object field.
+ static bool IsJSObjectFieldAccessor(
+ Handle<Map> map, Handle<String> name,
+ int* object_offset);
+
+
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionSetPrototype(Isolate* isolate,
diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc
new file mode 100644
index 0000000000..8097045b27
--- /dev/null
+++ b/deps/v8/src/allocation-site-scopes.cc
@@ -0,0 +1,108 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "allocation-site-scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
+ Handle<AllocationSite> scope_site;
+ if (top().is_null()) {
+ // We are creating the top level AllocationSite as opposed to a nested
+ // AllocationSite.
+ InitializeTraversal(isolate()->factory()->NewAllocationSite());
+ scope_site = Handle<AllocationSite>(*top(), isolate());
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating top level AllocationSite %p\n",
+ static_cast<void*>(*scope_site));
+ }
+ } else {
+ ASSERT(!current().is_null());
+ scope_site = isolate()->factory()->NewAllocationSite();
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
+ static_cast<void*>(*top()),
+ static_cast<void*>(*current()),
+ static_cast<void*>(*scope_site));
+ }
+ current()->set_nested_site(*scope_site);
+ update_current_site(*scope_site);
+ }
+ ASSERT(!scope_site.is_null());
+ return scope_site;
+}
+
+
+void AllocationSiteCreationContext::ExitScope(
+ Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ if (!object.is_null() && !object->IsFailure()) {
+ bool top_level = !scope_site.is_null() &&
+ top().is_identical_to(scope_site);
+
+ scope_site->set_transition_info(*object);
+ if (FLAG_trace_creation_allocation_sites) {
+ if (top_level) {
+ PrintF("*** Setting AllocationSite %p transition_info %p\n",
+ static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ } else {
+ PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
+ static_cast<void*>(*top()),
+ static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ }
+ }
+ }
+}
+
+
+Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
+ if (top().is_null()) {
+ InitializeTraversal(top_site_);
+ } else {
+ // Advance current site
+ Object* nested_site = current()->nested_site();
+ // Something is wrong if we advance to the end of the list here.
+ ASSERT(nested_site->IsAllocationSite());
+ update_current_site(AllocationSite::cast(nested_site));
+ }
+ return Handle<AllocationSite>(*current(), isolate());
+}
+
+
+void AllocationSiteUsageContext::ExitScope(
+ Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ // This assert ensures that we are pointing at the right sub-object in a
+ // recursive walk of a nested literal.
+ ASSERT(object.is_null() || *object == scope_site->transition_info());
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
new file mode 100644
index 0000000000..1c3afdf369
--- /dev/null
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -0,0 +1,115 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_SITE_SCOPES_H_
+#define V8_ALLOCATION_SITE_SCOPES_H_
+
+#include "ast.h"
+#include "handles.h"
+#include "objects.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+// AllocationSiteContext is the base class for walking and copying a nested
+// boilerplate with AllocationSite and AllocationMemento support.
+class AllocationSiteContext {
+ public:
+ AllocationSiteContext(Isolate* isolate, bool activated) {
+ isolate_ = isolate;
+ activated_ = activated;
+ };
+ virtual ~AllocationSiteContext() {}
+
+ Handle<AllocationSite> top() { return top_; }
+ Handle<AllocationSite> current() { return current_; }
+
+ // If activated, then recursively create mementos
+ bool activated() const { return activated_; }
+
+ // Returns the AllocationSite that matches this scope.
+ virtual Handle<AllocationSite> EnterNewScope() = 0;
+
+ // scope_site should be the handle returned by the matching EnterNewScope()
+ virtual void ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) = 0;
+
+ protected:
+ void update_current_site(AllocationSite* site) {
+ *(current_.location()) = site;
+ }
+
+ Isolate* isolate() { return isolate_; }
+ void InitializeTraversal(Handle<AllocationSite> site) {
+ top_ = site;
+ current_ = Handle<AllocationSite>(*top_, isolate());
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<AllocationSite> top_;
+ Handle<AllocationSite> current_;
+ bool activated_;
+};
+
+
+// AllocationSiteCreationContext aids in the creation of AllocationSites to
+// accompany object literals.
+class AllocationSiteCreationContext : public AllocationSiteContext {
+ public:
+ explicit AllocationSiteCreationContext(Isolate* isolate)
+ : AllocationSiteContext(isolate, true) { }
+
+ virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
+ virtual void ExitScope(Handle<AllocationSite> site,
+ Handle<JSObject> object) V8_OVERRIDE;
+};
+
+
+// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
+// behind some/all components of a copied object literal.
+class AllocationSiteUsageContext : public AllocationSiteContext {
+ public:
+ AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
+ bool activated)
+ : AllocationSiteContext(isolate, activated),
+ top_site_(site) { }
+
+ virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE;
+ virtual void ExitScope(Handle<AllocationSite> site,
+ Handle<JSObject> object) V8_OVERRIDE;
+
+ private:
+ Handle<AllocationSite> top_site_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_SITE_SCOPES_H_
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
new file mode 100644
index 0000000000..586ce3c45a
--- /dev/null
+++ b/deps/v8/src/allocation-tracker.cc
@@ -0,0 +1,279 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "allocation-tracker.h"
+
+#include "heap-snapshot-generator.h"
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationTraceNode::AllocationTraceNode(
+ AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ : tree_(tree),
+ function_id_(shared_function_info_id),
+ total_size_(0),
+ allocation_count_(0),
+ id_(tree->next_node_id()) {
+}
+
+
+AllocationTraceNode::~AllocationTraceNode() {
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+ for (int i = 0; i < children_.length(); i++) {
+ AllocationTraceNode* node = children_[i];
+ if (node->function_id() == id) return node;
+ }
+ return NULL;
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
+ AllocationTraceNode* child = FindChild(id);
+ if (child == NULL) {
+ child = new AllocationTraceNode(tree_, id);
+ children_.Add(child);
+ }
+ return child;
+}
+
+
+void AllocationTraceNode::AddAllocation(unsigned size) {
+ total_size_ += size;
+ ++allocation_count_;
+}
+
+
+void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
+ OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
+ if (tracker != NULL) {
+ const char* name = "<unknown function>";
+ if (function_id_ != 0) {
+ AllocationTracker::FunctionInfo* info =
+ tracker->GetFunctionInfo(function_id_);
+ if (info != NULL) {
+ name = info->name;
+ }
+ }
+ OS::Print("%s #%u", name, id_);
+ } else {
+ OS::Print("%u #%u", function_id_, id_);
+ }
+ OS::Print("\n");
+ indent += 2;
+ for (int i = 0; i < children_.length(); i++) {
+ children_[i]->Print(indent, tracker);
+ }
+}
+
+
+AllocationTraceTree::AllocationTraceTree()
+ : next_node_id_(1),
+ root_(this, 0) {
+}
+
+
+AllocationTraceTree::~AllocationTraceTree() {
+}
+
+
+AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
+ const Vector<SnapshotObjectId>& path) {
+ AllocationTraceNode* node = root();
+ for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ entry != path.start() - 1;
+ --entry) {
+ node = node->FindOrAddChild(*entry);
+ }
+ return node;
+}
+
+
+void AllocationTraceTree::Print(AllocationTracker* tracker) {
+ OS::Print("[AllocationTraceTree:]\n");
+ OS::Print("Total size | Allocation count | Function id | id\n");
+ root()->Print(0, tracker);
+}
+
+void AllocationTracker::DeleteUnresolvedLocation(
+ UnresolvedLocation** location) {
+ delete *location;
+}
+
+
+AllocationTracker::FunctionInfo::FunctionInfo()
+ : name(""),
+ script_name(""),
+ script_id(0),
+ line(-1),
+ column(-1) {
+}
+
+
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+AllocationTracker::AllocationTracker(
+ HeapObjectsMap* ids, StringsStorage* names)
+ : ids_(ids),
+ names_(names),
+ id_to_function_info_(AddressesMatch) {
+}
+
+
+AllocationTracker::~AllocationTracker() {
+ unresolved_locations_.Iterate(DeleteUnresolvedLocation);
+}
+
+
+void AllocationTracker::PrepareForSerialization() {
+ List<UnresolvedLocation*> copy(unresolved_locations_.length());
+ copy.AddAll(unresolved_locations_);
+ unresolved_locations_.Clear();
+ for (int i = 0; i < copy.length(); i++) {
+ copy[i]->Resolve();
+ delete copy[i];
+ }
+}
+
+
+void AllocationTracker::NewObjectEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ Heap* heap = ids_->heap();
+
+ // Mark the new block as FreeSpace to make sure the heap is iterable
+ // while we are capturing stack trace.
+ FreeListNode::FromAddress(addr)->set_size(heap, size);
+ ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
+ ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+
+ Isolate* isolate = heap->isolate();
+ int length = 0;
+ StackTraceFrameIterator it(isolate);
+ while (!it.done() && length < kMaxAllocationTraceLength) {
+ JavaScriptFrame* frame = it.frame();
+ SharedFunctionInfo* shared = frame->function()->shared();
+ SnapshotObjectId id = ids_->FindEntry(shared->address());
+ allocation_trace_buffer_[length++] = id;
+ AddFunctionInfo(shared, id);
+ it.Advance();
+ }
+ AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
+ Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ top_node->AddAllocation(size);
+}
+
+
+static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
+ return ComputeIntegerHash(static_cast<uint32_t>(id),
+ v8::internal::kZeroHashSeed);
+}
+
+
+AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
+ if (entry == NULL) {
+ return NULL;
+ }
+ return reinterpret_cast<FunctionInfo*>(entry->value);
+}
+
+
+void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
+ if (entry->value == NULL) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = names_->GetFunctionName(shared->DebugName());
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ info->script_name = names_->GetName(name);
+ }
+ info->script_id = script->id()->value();
+ // Converting start offset into line and column may cause heap
+ // allocations so we postpone them until snapshot serialization.
+ unresolved_locations_.Add(new UnresolvedLocation(
+ script,
+ shared->start_position(),
+ info));
+ }
+ entry->value = info;
+ }
+}
+
+
+AllocationTracker::UnresolvedLocation::UnresolvedLocation(
+ Script* script, int start, FunctionInfo* info)
+ : start_position_(start),
+ info_(info) {
+ script_ = Handle<Script>::cast(
+ script->GetIsolate()->global_handles()->Create(script));
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(script_.location()),
+ this, &HandleWeakScript);
+}
+
+
+AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
+ if (!script_.is_null()) {
+ script_->GetIsolate()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(script_.location()));
+ }
+}
+
+
+void AllocationTracker::UnresolvedLocation::Resolve() {
+ if (script_.is_null()) return;
+ info_->line = GetScriptLineNumber(script_, start_position_);
+ info_->column = GetScriptColumnNumber(script_, start_position_);
+}
+
+
+void AllocationTracker::UnresolvedLocation::HandleWeakScript(
+ v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data) {
+ UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
+ location->script_ = Handle<Script>::null();
+ obj->Dispose();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h
new file mode 100644
index 0000000000..617cf902e8
--- /dev/null
+++ b/deps/v8/src/allocation-tracker.h
@@ -0,0 +1,138 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_TRACKER_H_
+#define V8_ALLOCATION_TRACKER_H_
+
+namespace v8 {
+namespace internal {
+
+class HeapObjectsMap;
+
+class AllocationTraceTree;
+
+class AllocationTraceNode {
+ public:
+ AllocationTraceNode(AllocationTraceTree* tree,
+ SnapshotObjectId shared_function_info_id);
+ ~AllocationTraceNode();
+ AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
+ void AddAllocation(unsigned size);
+
+ SnapshotObjectId function_id() const { return function_id_; }
+ unsigned allocation_size() const { return total_size_; }
+ unsigned allocation_count() const { return allocation_count_; }
+ unsigned id() const { return id_; }
+ Vector<AllocationTraceNode*> children() const { return children_.ToVector(); }
+
+ void Print(int indent, AllocationTracker* tracker);
+
+ private:
+ AllocationTraceTree* tree_;
+ SnapshotObjectId function_id_;
+ unsigned total_size_;
+ unsigned allocation_count_;
+ unsigned id_;
+ List<AllocationTraceNode*> children_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode);
+};
+
+
+class AllocationTraceTree {
+ public:
+ AllocationTraceTree();
+ ~AllocationTraceTree();
+ AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
+ AllocationTraceNode* root() { return &root_; }
+ unsigned next_node_id() { return next_node_id_++; }
+ void Print(AllocationTracker* tracker);
+
+ private:
+ unsigned next_node_id_;
+ AllocationTraceNode root_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceTree);
+};
+
+
+class AllocationTracker {
+ public:
+ struct FunctionInfo {
+ FunctionInfo();
+ const char* name;
+ const char* script_name;
+ int script_id;
+ int line;
+ int column;
+ };
+
+ AllocationTracker(HeapObjectsMap* ids, StringsStorage* names);
+ ~AllocationTracker();
+
+ void PrepareForSerialization();
+ void NewObjectEvent(Address addr, int size);
+
+ AllocationTraceTree* trace_tree() { return &trace_tree_; }
+ HashMap* id_to_function_info() { return &id_to_function_info_; }
+ FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+
+ private:
+ void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+
+ class UnresolvedLocation {
+ public:
+ UnresolvedLocation(Script* script, int start, FunctionInfo* info);
+ ~UnresolvedLocation();
+ void Resolve();
+
+ private:
+ static void HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data);
+ Handle<Script> script_;
+ int start_position_;
+ FunctionInfo* info_;
+ };
+ static void DeleteUnresolvedLocation(UnresolvedLocation** location);
+
+ static const int kMaxAllocationTraceLength = 64;
+ HeapObjectsMap* ids_;
+ StringsStorage* names_;
+ AllocationTraceTree trace_tree_;
+ SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
+ HashMap id_to_function_info_;
+ List<UnresolvedLocation*> unresolved_locations_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_TRACKER_H_
+
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 71a8f4a6cf..8a73877eed 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -77,8 +77,7 @@
namespace v8 {
#define ON_BAILOUT(isolate, location, code) \
- if (IsDeadCheck(isolate, location) || \
- IsExecutionTerminatingCheck(isolate)) { \
+ if (IsExecutionTerminatingCheck(isolate)) { \
code; \
UNREACHABLE(); \
}
@@ -253,13 +252,6 @@ static inline bool ApiCheck(bool condition,
}
-static bool ReportV8Dead(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "V8 is no longer usable");
- return true;
-}
-
-
static bool ReportEmptyHandle(const char* location) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, "Reading from empty handle");
@@ -267,24 +259,6 @@ static bool ReportEmptyHandle(const char* location) {
}
-/**
- * IsDeadCheck checks that the vm is usable. If, for instance, the vm has been
- * out of memory at some point this check will fail. It should be called on
- * entry to all methods that touch anything in the heap, except destructors
- * which you sometimes can't avoid calling after the vm has crashed. Functions
- * that call EnsureInitialized or ON_BAILOUT don't have to also call
- * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
- * can arrange to return if the VM is dead. This is needed to ensure that no VM
- * heap allocations are attempted on a dead VM. EnsureInitialized has the
- * advantage over ON_BAILOUT that it actually initializes the VM if this has not
- * yet been done.
- */
-static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
- return !isolate->IsInitialized()
- && isolate->IsDead() ? ReportV8Dead(location) : false;
-}
-
-
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (!isolate->IsInitialized()) return false;
if (isolate->has_scheduled_exception()) {
@@ -321,7 +295,6 @@ static bool InitializeHelper(i::Isolate* isolate) {
static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
const char* location) {
- if (IsDeadCheck(isolate, location)) return false;
if (isolate != NULL) {
if (isolate->IsInitialized()) return true;
}
@@ -500,19 +473,7 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ThrowException()")) {
- return v8::Handle<Value>();
- }
- ENTER_V8(isolate);
- // If we're passed an empty handle, we throw an undefined exception
- // to deal more gracefully with out of memory situations.
- if (value.IsEmpty()) {
- isolate->ScheduleThrow(isolate->heap()->undefined_value());
- } else {
- isolate->ScheduleThrow(*Utils::OpenHandle(*value));
- }
- return v8::Undefined();
+ return v8::Isolate::GetCurrent()->ThrowException(value);
}
@@ -602,8 +563,7 @@ ResourceConstraints::ResourceConstraints()
: max_young_space_size_(0),
max_old_space_size_(0),
max_executable_size_(0),
- stack_limit_(NULL),
- is_memory_constrained_() { }
+ stack_limit_(NULL) { }
bool SetResourceConstraints(ResourceConstraints* constraints) {
@@ -614,7 +574,8 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
int max_executable_size = constraints->max_executable_size();
if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) {
// After initialization it's too late to change Heap constraints.
- ASSERT(!isolate->IsInitialized());
+ // TODO(rmcilroy): fix this assert.
+ // ASSERT(!isolate->IsInitialized());
bool result = isolate->heap()->ConfigureHeap(young_space_size / 2,
old_gen_size,
max_executable_size);
@@ -624,16 +585,11 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
- if (constraints->is_memory_constrained().has_value) {
- isolate->set_is_memory_constrained(
- constraints->is_memory_constrained().value);
- }
return true;
}
i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
- if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef DEBUG
@@ -728,50 +684,58 @@ int HandleScope::NumberOfHandles() {
}
-i::Object** HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(i::Isolate::Current(), value);
+i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
+ return i::HandleScope::CreateHandle(isolate, value);
+}
+
+
+i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object,
+ i::Object* value) {
+ ASSERT(heap_object->IsHeapObject());
+ return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value);
}
-i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
- ASSERT(isolate == i::Isolate::Current());
- return i::HandleScope::CreateHandle(isolate, value);
+EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ escape_slot_ = CreateHandle(isolate, isolate->heap()->the_hole_value());
+ Initialize(v8_isolate);
}
-i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
- ASSERT(value->IsHeapObject());
- return reinterpret_cast<i::Object**>(
- i::HandleScope::CreateHandle(value->GetIsolate(), value));
+i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
+ ApiCheck(*escape_slot_ == isolate_->heap()->the_hole_value(),
+ "EscapeableHandleScope::Escape",
+ "Escape value set twice");
+ if (escape_value == NULL) {
+ *escape_slot_ = isolate_->heap()->undefined_value();
+ return NULL;
+ }
+ *escape_slot_ = *escape_value;
+ return escape_slot_;
}
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
ENTER_V8(isolate);
-
isolate->handle_scope_implementer()->EnterContext(env);
-
isolate->handle_scope_implementer()->SaveContext(isolate->context());
isolate->set_context(*env);
}
void Context::Exit() {
- // Exit is essentially a static function and doesn't use the
- // receiver, so we have to get the current isolate from the thread
- // local.
+ // TODO(dcarney): fix this once chrome is fixed.
i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
-
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
+ i::Handle<i::Context> context = i::Handle<i::Context>::null();
+ ENTER_V8(isolate);
+ if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
-
// Content of 'last_context' could be NULL.
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
@@ -797,7 +761,7 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
bool can_grow,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
- bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
+ bool ok =
ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
@@ -974,7 +938,6 @@ void Template::Set(v8::Handle<String> name,
v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
const int kSize = 3;
@@ -993,7 +956,6 @@ void Template::SetAccessorProperty(
v8::PropertyAttribute attribute,
v8::AccessControl access_control) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Template::SetAccessor()")) return;
ENTER_V8(isolate);
ASSERT(!name.IsEmpty());
ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
@@ -1019,9 +981,6 @@ static void InitializeFunctionTemplate(
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
- return Local<ObjectTemplate>();
- }
ENTER_V8(isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
isolate);
@@ -1035,7 +994,6 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
}
@@ -1061,7 +1019,9 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
@@ -1278,7 +1238,6 @@ int TypeSwitch::match(v8::Handle<Value> value) {
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
v8::Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
@@ -1286,7 +1245,9 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_call_code(*obj);
}
@@ -1324,7 +1285,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
isolate->factory()->NewExecutableAccessorInfo();
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
}
@@ -1349,8 +1312,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
- || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ if (EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
return Local<ObjectTemplate>();
ENTER_V8(isolate);
i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this);
@@ -1367,7 +1329,6 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
void FunctionTemplate::SetLength(int length) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_length(length);
}
@@ -1375,7 +1336,6 @@ void FunctionTemplate::SetLength(int length) {
void FunctionTemplate::SetClassName(Handle<String> name) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
}
@@ -1383,9 +1343,6 @@ void FunctionTemplate::SetClassName(Handle<String> name) {
void FunctionTemplate::SetHiddenPrototype(bool value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_hidden_prototype(value);
}
@@ -1393,9 +1350,6 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_read_only_prototype(true);
}
@@ -1403,9 +1357,6 @@ void FunctionTemplate::ReadOnlyPrototype() {
void FunctionTemplate::RemovePrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::RemovePrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_remove_prototype(true);
}
@@ -1422,9 +1373,6 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
v8::Handle<FunctionTemplate> constructor) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
- return Local<ObjectTemplate>();
- }
EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
@@ -1495,7 +1443,6 @@ static bool TemplateSetAccessor(
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
@@ -1551,9 +1498,6 @@ void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1571,7 +1515,9 @@ void ObjectTemplate::SetNamedPropertyHandler(
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
cons->set_named_property_handler(*obj);
}
@@ -1579,7 +1525,6 @@ void ObjectTemplate::SetNamedPropertyHandler(
void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1596,9 +1541,6 @@ void ObjectTemplate::SetAccessCheckCallbacks(
Handle<Value> data,
bool turned_on_by_default) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1611,7 +1553,9 @@ void ObjectTemplate::SetAccessCheckCallbacks(
SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
info->set_data(*Utils::OpenHandle(*data));
i::FunctionTemplateInfo* constructor =
@@ -1630,9 +1574,6 @@ void ObjectTemplate::SetIndexedPropertyHandler(
IndexedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1650,7 +1591,9 @@ void ObjectTemplate::SetIndexedPropertyHandler(
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
cons->set_indexed_property_handler(*obj);
}
@@ -1659,10 +1602,6 @@ void ObjectTemplate::SetIndexedPropertyHandler(
void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1674,26 +1613,21 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
cons->set_instance_call_handler(*obj);
}
int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::InternalFieldCount()")) {
- return 0;
- }
return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
}
void ObjectTemplate::SetInternalFieldCount(int value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
- return;
- }
if (!ApiCheck(i::Smi::IsValid(value),
"v8::ObjectTemplate::SetInternalFieldCount()",
"Invalid internal field count")) {
@@ -1713,10 +1647,13 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// --- S c r i p t D a t a ---
-ScriptData* ScriptData::PreCompile(const char* input, int length) {
+ScriptData* ScriptData::PreCompile(v8::Isolate* isolate,
+ const char* input,
+ int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::PreParserApi::PreParse(i::Isolate::Current(), &stream);
+ return i::PreParserApi::PreParse(
+ reinterpret_cast<i::Isolate*>(isolate), &stream);
}
@@ -1763,13 +1700,13 @@ Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
LOG_API(isolate, "Script::New");
ENTER_V8(isolate);
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
@@ -1786,8 +1723,9 @@ Local<Script> Script::New(v8::Handle<String> source,
static_cast<int>(origin->ResourceColumnOffset()->Value());
}
if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
is_shared_cross_origin =
- origin->ResourceIsSharedCrossOrigin() == v8::True();
+ origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate);
}
}
EXCEPTION_PREAMBLE(isolate);
@@ -1831,7 +1769,8 @@ Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
LOG_API(isolate, "Script::Compile");
ENTER_V8(isolate);
@@ -1858,7 +1797,11 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Value> Script::Run() {
- i::Isolate* isolate = i::Isolate::Current();
+ // If execution is terminating, Compile(script)->Run() requires this check.
+ if (this == NULL) return Local<Value>();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
@@ -1867,7 +1810,6 @@ Local<Value> Script::Run() {
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSFunction> fun;
if (obj->IsSharedFunctionInfo()) {
i::Handle<i::SharedFunctionInfo>
@@ -1905,7 +1847,9 @@ static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
Local<Value> Script::Id() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
LOG_API(isolate, "Script::Id");
i::Object* raw_id = NULL;
@@ -1922,7 +1866,9 @@ Local<Value> Script::Id() {
int Script::GetId() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
LOG_API(isolate, "Script::Id");
{
@@ -1935,10 +1881,11 @@ int Script::GetId() {
int Script::GetLineNumber(int code_pos) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
LOG_API(isolate, "Script::GetLineNumber");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsScript()) {
i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
return i::GetScriptLineNumber(script, code_pos);
@@ -1949,10 +1896,11 @@ int Script::GetLineNumber(int code_pos) {
Handle<Value> Script::GetScriptName() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
LOG_API(isolate, "Script::GetName");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsScript()) {
i::Object* name = i::Script::cast(*obj)->name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
@@ -1963,7 +1911,9 @@ Handle<Value> Script::GetScriptName() {
void Script::SetData(v8::Handle<String> data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::SetData()", return);
LOG_API(isolate, "Script::SetData");
{
@@ -1995,8 +1945,9 @@ v8::TryCatch::TryCatch()
v8::TryCatch::~TryCatch() {
ASSERT(isolate_ == i::Isolate::Current());
if (rethrow_) {
- v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate_));
- v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
+ v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(isolate, Exception());
if (HasCaught() && capture_message_) {
// If an exception was caught and rethrow_ is indicated, the saved
// message, script, and location need to be restored to Isolate TLS
@@ -2006,7 +1957,7 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::ThrowException(exc);
+ reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
} else {
isolate_->UnregisterTryCatchHandler(this);
@@ -2032,7 +1983,7 @@ bool v8::TryCatch::HasTerminated() const {
v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>();
rethrow_ = true;
- return v8::Undefined();
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_));
}
@@ -2056,7 +2007,7 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
i::HandleScope scope(isolate_);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate_->factory()->stack_string();
- if (!obj->HasProperty(*name)) return v8::Local<Value>();
+ if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>();
i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
if (value.is_null()) return v8::Local<Value>();
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
@@ -2116,9 +2067,6 @@ Local<String> Message::Get() const {
v8::Handle<Value> Message::GetScriptResourceName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
@@ -2135,9 +2083,6 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
v8::Handle<Value> Message::GetScriptData() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
- return Local<Value>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
@@ -2153,9 +2098,6 @@ v8::Handle<Value> Message::GetScriptData() const {
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
- return Local<v8::StackTrace>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
@@ -2215,7 +2157,6 @@ int Message::GetLineNumber() const {
int Message::GetStartPosition() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -2226,7 +2167,6 @@ int Message::GetStartPosition() const {
int Message::GetEndPosition() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -2237,9 +2177,6 @@ int Message::GetEndPosition() const {
int Message::GetStartColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
- return kNoColumnInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -2255,7 +2192,6 @@ int Message::GetStartColumn() const {
int Message::GetEndColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -2275,7 +2211,6 @@ int Message::GetEndColumn() const {
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::IsSharedCrossOrigin()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -2307,7 +2242,6 @@ Local<String> Message::GetSourceLine() const {
void Message::PrintCurrentStackTrace(FILE* out) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
ENTER_V8(isolate);
isolate->PrintCurrentStackTrace(out);
}
@@ -2317,9 +2251,6 @@ void Message::PrintCurrentStackTrace(FILE* out) {
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
- return Local<StackFrame>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
@@ -2331,7 +2262,6 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
int StackTrace::GetFrameCount() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
ENTER_V8(isolate);
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
@@ -2339,7 +2269,6 @@ int StackTrace::GetFrameCount() const {
Local<Array> StackTrace::AsArray() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
ENTER_V8(isolate);
return Utils::ToLocal(Utils::OpenHandle(this));
}
@@ -2348,9 +2277,6 @@ Local<Array> StackTrace::AsArray() {
Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
StackTraceOptions options) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
- Local<StackTrace>();
- }
ENTER_V8(isolate);
i::Handle<i::JSArray> stackTrace =
isolate->CaptureCurrentStackTrace(frame_limit, options);
@@ -2362,9 +2288,6 @@ Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
int StackFrame::GetLineNumber() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
- return Message::kNoLineNumberInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2378,9 +2301,6 @@ int StackFrame::GetLineNumber() const {
int StackFrame::GetColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
- return Message::kNoColumnInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2394,9 +2314,6 @@ int StackFrame::GetColumn() const {
int StackFrame::GetScriptId() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptId()")) {
- return Message::kNoScriptIdInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2410,9 +2327,6 @@ int StackFrame::GetScriptId() const {
Local<String> StackFrame::GetScriptName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2426,9 +2340,6 @@ Local<String> StackFrame::GetScriptName() const {
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2442,9 +2353,6 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
Local<String> StackFrame::GetFunctionName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2458,7 +2366,6 @@ Local<String> StackFrame::GetFunctionName() const {
bool StackFrame::IsEval() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2469,7 +2376,6 @@ bool StackFrame::IsEval() const {
bool StackFrame::IsConstructor() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2504,9 +2410,6 @@ Local<Value> JSON::Parse(Local<String> json_string) {
// --- D a t a ---
bool Value::FullIsUndefined() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
- return false;
- }
bool result = Utils::OpenHandle(this)->IsUndefined();
ASSERT_EQ(result, QuickIsUndefined());
return result;
@@ -2514,7 +2417,6 @@ bool Value::FullIsUndefined() const {
bool Value::FullIsNull() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
bool result = Utils::OpenHandle(this)->IsNull();
ASSERT_EQ(result, QuickIsNull());
return result;
@@ -2522,27 +2424,21 @@ bool Value::FullIsNull() const {
bool Value::IsTrue() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
return Utils::OpenHandle(this)->IsTrue();
}
bool Value::IsFalse() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
return Utils::OpenHandle(this)->IsFalse();
}
bool Value::IsFunction() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsJSFunction();
}
bool Value::FullIsString() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
bool result = Utils::OpenHandle(this)->IsString();
ASSERT_EQ(result, QuickIsString());
return result;
@@ -2550,20 +2446,16 @@ bool Value::FullIsString() const {
bool Value::IsSymbol() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsSymbol()")) return false;
return Utils::OpenHandle(this)->IsSymbol();
}
bool Value::IsArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
}
bool Value::IsArrayBuffer() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
- return false;
return Utils::OpenHandle(this)->IsJSArrayBuffer();
}
@@ -2574,8 +2466,6 @@ bool Value::IsArrayBufferView() const {
bool Value::IsTypedArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
- return false;
return Utils::OpenHandle(this)->IsJSTypedArray();
}
@@ -2594,8 +2484,6 @@ F(Uint8ClampedArray, kExternalPixelArray)
#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
bool Value::Is##TypedArray() const { \
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::Is" #TypedArray "()")) \
- return false; \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
if (!obj->IsJSTypedArray()) return false; \
return i::JSTypedArray::cast(*obj)->type() == type_const; \
@@ -2612,35 +2500,26 @@ bool Value::IsDataView() const {
bool Value::IsObject() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
}
bool Value::IsNumber() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
return Utils::OpenHandle(this)->IsNumber();
}
bool Value::IsBoolean() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsBoolean();
}
bool Value::IsExternal() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsExternal();
}
bool Value::IsInt32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
@@ -2657,7 +2536,6 @@ bool Value::IsInt32() const {
bool Value::IsUint32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
@@ -2675,7 +2553,6 @@ bool Value::IsUint32() const {
bool Value::IsDate() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Date_string());
}
@@ -2683,7 +2560,6 @@ bool Value::IsDate() const {
bool Value::IsStringObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->String_string());
}
@@ -2693,7 +2569,6 @@ bool Value::IsSymbolObject() const {
// TODO(svenpanne): these and other test functions should be written such
// that they do not use Isolate::Current().
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsSymbolObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
}
@@ -2701,7 +2576,6 @@ bool Value::IsSymbolObject() const {
bool Value::IsNumberObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Number_string());
}
@@ -2729,7 +2603,6 @@ static bool CheckConstructor(i::Isolate* isolate,
bool Value::IsNativeError() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsJSObject()) {
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
@@ -2748,14 +2621,12 @@ bool Value::IsNativeError() const {
bool Value::IsBooleanObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
}
bool Value::IsRegExp() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
}
@@ -2768,9 +2639,6 @@ Local<String> Value::ToString() const {
str = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToString()")) {
- return Local<String>();
- }
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2788,9 +2656,6 @@ Local<String> Value::ToDetailString() const {
str = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
- return Local<String>();
- }
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2808,9 +2673,6 @@ Local<v8::Object> Value::ToObject() const {
val = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToObject()")) {
- return Local<v8::Object>();
- }
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2827,9 +2689,6 @@ Local<Boolean> Value::ToBoolean() const {
return ToApiHandle<Boolean>(obj);
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
- return Local<Boolean>();
- }
LOG_API(isolate, "ToBoolean");
ENTER_V8(isolate);
i::Handle<i::Object> val =
@@ -2846,9 +2705,6 @@ Local<Number> Value::ToNumber() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) {
- return Local<Number>();
- }
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2866,7 +2722,6 @@ Local<Integer> Value::ToInteger() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2886,7 +2741,6 @@ void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
ApiCheck(Utils::OpenHandle(that)->IsExternal(),
"v8::External::Cast()",
"Could not convert to external");
@@ -2894,7 +2748,6 @@ void External::CheckCast(v8::Value* that) {
void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
@@ -2903,7 +2756,6 @@ void v8::Object::CheckCast(Value* that) {
void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
@@ -2912,7 +2764,6 @@ void v8::Function::CheckCast(Value* that) {
void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
@@ -2921,7 +2772,6 @@ void v8::String::CheckCast(v8::Value* that) {
void v8::Symbol::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsSymbol(),
"v8::Symbol::Cast()",
@@ -2930,7 +2780,6 @@ void v8::Symbol::CheckCast(v8::Value* that) {
void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
@@ -2939,7 +2788,6 @@ void v8::Number::CheckCast(v8::Value* that) {
void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
@@ -2948,7 +2796,6 @@ void v8::Integer::CheckCast(v8::Value* that) {
void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
@@ -2957,7 +2804,6 @@ void v8::Array::CheckCast(Value* that) {
void v8::ArrayBuffer::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::ArrayBuffer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArrayBuffer(),
"v8::ArrayBuffer::Cast()",
@@ -2974,7 +2820,6 @@ void v8::ArrayBufferView::CheckCast(Value* that) {
void v8::TypedArray::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSTypedArray(),
"v8::TypedArray::Cast()",
@@ -2984,8 +2829,6 @@ void v8::TypedArray::CheckCast(Value* that) {
#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
void v8::ApiClass::CheckCast(Value* that) { \
- if (IsDeadCheck(i::Isolate::Current(), "v8::" #ApiClass "::Cast()")) \
- return; \
i::Handle<i::Object> obj = Utils::OpenHandle(that); \
ApiCheck(obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == typeConst, \
@@ -3009,7 +2852,6 @@ void v8::DataView::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
"v8::Date::Cast()",
@@ -3019,7 +2861,6 @@ void v8::Date::CheckCast(v8::Value* that) {
void v8::StringObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
"v8::StringObject::Cast()",
@@ -3029,7 +2870,6 @@ void v8::StringObject::CheckCast(v8::Value* that) {
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::SymbolObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
"v8::SymbolObject::Cast()",
@@ -3039,7 +2879,6 @@ void v8::SymbolObject::CheckCast(v8::Value* that) {
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
"v8::NumberObject::Cast()",
@@ -3049,7 +2888,6 @@ void v8::NumberObject::CheckCast(v8::Value* that) {
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
"v8::BooleanObject::Cast()",
@@ -3058,7 +2896,6 @@ void v8::BooleanObject::CheckCast(v8::Value* that) {
void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSRegExp(),
"v8::RegExp::Cast()",
@@ -3078,9 +2915,6 @@ double Value::NumberValue() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
- return i::OS::nan_value();
- }
LOG_API(isolate, "NumberValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3098,7 +2932,6 @@ int64_t Value::IntegerValue() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
LOG_API(isolate, "IntegerValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3120,7 +2953,6 @@ Local<Int32> Value::ToInt32() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3138,7 +2970,6 @@ Local<Uint32> Value::ToUint32() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3156,7 +2987,6 @@ Local<Uint32> Value::ToArrayIndex() const {
return Local<Uint32>();
}
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
LOG_API(isolate, "ToArrayIndex");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3184,7 +3014,6 @@ int32_t Value::Int32Value() const {
return i::Smi::cast(*obj)->value();
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
LOG_API(isolate, "Int32Value (slow)");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3202,9 +3031,8 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Equals()")
- || EmptyCheck("v8::Value::Equals()", this)
- || EmptyCheck("v8::Value::Equals()", that)) {
+ if (EmptyCheck("v8::Value::Equals()", this) ||
+ EmptyCheck("v8::Value::Equals()", that)) {
return false;
}
LOG_API(isolate, "Equals");
@@ -3229,9 +3057,8 @@ bool Value::Equals(Handle<Value> that) const {
bool Value::StrictEquals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
- || EmptyCheck("v8::Value::StrictEquals()", this)
- || EmptyCheck("v8::Value::StrictEquals()", that)) {
+ if (EmptyCheck("v8::Value::StrictEquals()", this) ||
+ EmptyCheck("v8::Value::StrictEquals()", that)) {
return false;
}
LOG_API(isolate, "StrictEquals");
@@ -3259,13 +3086,25 @@ bool Value::StrictEquals(Handle<Value> that) const {
}
+bool Value::SameValue(Handle<Value> that) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (EmptyCheck("v8::Value::SameValue()", this) ||
+ EmptyCheck("v8::Value::SameValue()", that)) {
+ return false;
+ }
+ LOG_API(isolate, "SameValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ return obj->SameValue(*other);
+}
+
+
uint32_t Value::Uint32Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
LOG_API(isolate, "Uint32Value");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3625,7 +3464,7 @@ bool v8::Object::Has(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasElement(index);
+ return i::JSReceiver::HasElement(self, index);
}
@@ -3679,8 +3518,8 @@ bool v8::Object::HasOwnProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
return false);
- return Utils::OpenHandle(this)->HasLocalProperty(
- *Utils::OpenHandle(*key));
+ return i::JSReceiver::HasLocalProperty(
+ Utils::OpenHandle(this), Utils::OpenHandle(*key));
}
@@ -3688,9 +3527,8 @@ bool v8::Object::HasRealNamedProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealNamedProperty(
- isolate,
- *Utils::OpenHandle(*key));
+ return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this),
+ Utils::OpenHandle(*key));
}
@@ -3698,7 +3536,7 @@ bool v8::Object::HasRealIndexedProperty(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealElementProperty(isolate, index);
+ return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
}
@@ -3708,9 +3546,8 @@ bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
"v8::Object::HasRealNamedCallbackProperty()",
return false);
ENTER_V8(isolate);
- return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
- isolate,
- *Utils::OpenHandle(*key));
+ return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this),
+ Utils::OpenHandle(*key));
}
@@ -3813,7 +3650,7 @@ Local<v8::Object> v8::Object::Clone() {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::Copy(self);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(self);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -4113,7 +3950,7 @@ bool v8::Object::IsCallable() {
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv,
int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -4141,7 +3978,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
}
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, recv_obj, argc, args, &has_pending_exception);
+ isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -4225,7 +4062,7 @@ Local<v8::Object> Function::NewInstance(int argc,
}
-Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
+Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
@@ -4242,7 +4079,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, recv_obj, argc, args, &has_pending_exception);
+ isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
@@ -4274,6 +4111,29 @@ Handle<Value> Function::GetInferredName() const {
}
+Handle<Value> Function::GetDisplayName() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
+ return ToApiHandle<Primitive>(
+ isolate->factory()->undefined_value()));
+ ENTER_V8(isolate);
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ i::Handle<i::String> property_name =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("displayName"));
+ i::LookupResult lookup(isolate);
+ func->LookupRealNamedProperty(*property_name, &lookup);
+ if (lookup.IsFound()) {
+ i::Object* value = lookup.GetLazyValue();
+ if (value && value->IsString()) {
+ i::String* name = i::String::cast(value);
+ if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name));
+ }
+ }
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
+}
+
+
ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
@@ -4312,12 +4172,20 @@ int Function::GetScriptColumnNumber() const {
}
+bool Function::IsBuiltin() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ return func->IsBuiltin();
+}
+
+
Handle<Value> Function::GetScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript())
- return v8::Undefined();
+ i::Isolate* isolate = func->GetIsolate();
+ if (!func->shared()->script()->IsScript()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->id(), isolate));
}
@@ -4331,16 +4199,12 @@ int Function::ScriptId() const {
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
return str->length();
}
bool String::IsOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
- return false;
- }
return str->HasOnlyOneByteChars();
}
@@ -4456,10 +4320,6 @@ class ContainsOnlyOneByteHelper {
bool String::ContainsOnlyOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::ContainsOnlyOneByte()")) {
- return false;
- }
if (str->HasOnlyOneByteChars()) return true;
ContainsOnlyOneByteHelper helper;
return helper.Check(*str);
@@ -4663,7 +4523,6 @@ static int Utf8Length(i::String* str, i::Isolate* isolate) {
int String::Utf8Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
i::Isolate* isolate = str->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0;
return v8::Utf8Length(*str, isolate);
}
@@ -4849,7 +4708,6 @@ int String::WriteUtf8(char* buffer,
int* nchars_ref,
int options) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
@@ -4894,40 +4752,6 @@ int String::WriteUtf8(char* buffer,
}
-int String::WriteAscii(char* buffer,
- int start,
- int length,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
- LOG_API(isolate, "String::WriteAscii");
- ENTER_V8(isolate);
- ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- FlattenString(str); // Flatten the string for efficiency.
- }
-
- int end = length;
- if ((length == -1) || (length > str->length() - start)) {
- end = str->length() - start;
- }
- if (end < 0) return 0;
- i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start);
- int i;
- for (i = 0; i < end; i++) {
- char c = static_cast<char>(write_stream.GetNext());
- if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
- buffer[i] = c;
- }
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
- buffer[i] = '\0';
- }
- return i;
-}
-
-
template<typename CharType>
static inline int WriteHelper(const String* string,
CharType* buffer,
@@ -4935,7 +4759,6 @@ static inline int WriteHelper(const String* string,
int length,
int options) {
i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
LOG_API(isolate, "String::Write");
ENTER_V8(isolate);
ASSERT(start >= 0 && length >= -1);
@@ -4977,9 +4800,6 @@ int String::Write(uint16_t* buffer,
bool v8::String::IsExternal() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
- return false;
- }
EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
return i::StringShape(*str).IsExternalTwoByte();
}
@@ -4987,9 +4807,6 @@ bool v8::String::IsExternal() const {
bool v8::String::IsExternalAscii() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) {
- return false;
- }
return i::StringShape(*str).IsExternalAscii();
}
@@ -5035,10 +4852,6 @@ void v8::String::VerifyExternalStringResourceBase(
const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::GetExternalAsciiStringResource()")) {
- return NULL;
- }
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
i::Handle<i::ExternalAsciiString>::cast(str)->resource();
@@ -5050,8 +4863,6 @@ const v8::String::ExternalAsciiStringResource*
Local<Value> Symbol::Name() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Name()"))
- return Local<Value>();
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Handle<i::Object> name(sym->name(), sym->GetIsolate());
return Utils::ToLocal(name);
@@ -5059,21 +4870,18 @@ Local<Value> Symbol::Name() const {
double Number::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
}
bool Boolean::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsTrue();
}
int64_t Integer::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -5084,7 +4892,6 @@ int64_t Integer::Value() const {
int32_t Int32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -5095,7 +4902,6 @@ int32_t Int32::Value() const {
uint32_t Uint32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -5107,9 +4913,6 @@ uint32_t Uint32::Value() const {
int v8::Object::InternalFieldCount() {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) {
- return 0;
- }
return obj->GetInternalFieldCount();
}
@@ -5117,10 +4920,9 @@ int v8::Object::InternalFieldCount() {
static bool InternalFieldOK(i::Handle<i::JSObject> obj,
int index,
const char* location) {
- return !IsDeadCheck(obj->GetIsolate(), location) &&
- ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
+ return ApiCheck(index < obj->GetInternalFieldCount(),
+ location,
+ "Internal field out of bounds");
}
@@ -5191,11 +4993,6 @@ void v8::V8::SetReturnAddressLocationResolver(
}
-bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- return SetFunctionEntryHook(Isolate::GetCurrent(), entry_hook);
-}
-
-
bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
FunctionEntryHook entry_hook) {
ASSERT(ext_isolate != NULL);
@@ -5256,25 +5053,8 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
heap_size_limit_(0) { }
-void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized()) {
- // Isolate is unitialized thus heap is not configured yet.
- heap_statistics->total_heap_size_ = 0;
- heap_statistics->total_heap_size_executable_ = 0;
- heap_statistics->total_physical_size_ = 0;
- heap_statistics->used_heap_size_ = 0;
- heap_statistics->heap_size_limit_ = 0;
- return;
- }
- Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate);
- return ext_isolate->GetHeapStatistics(heap_statistics);
-}
-
-
void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
isolate->heap()->VisitExternalResources(visitor);
}
@@ -5298,8 +5078,6 @@ class VisitorAdapter : public i::ObjectVisitor {
void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
-
i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
@@ -5311,8 +5089,6 @@ void v8::V8::VisitHandlesForPartialDependence(
Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
ASSERT(isolate == i::Isolate::Current());
- IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
-
i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
@@ -5423,7 +5199,6 @@ Local<Context> v8::Context::New(
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
@@ -5438,9 +5213,6 @@ Local<Context> v8::Context::New(
void v8::Context::SetSecurityToken(Handle<Value> token) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) {
- return;
- }
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
@@ -5450,10 +5222,6 @@ void v8::Context::SetSecurityToken(Handle<Value> token) {
void v8::Context::UseDefaultSecurityToken() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::UseDefaultSecurityToken()")) {
- return;
- }
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global_object());
@@ -5462,9 +5230,6 @@ void v8::Context::UseDefaultSecurityToken() {
Handle<Value> v8::Context::GetSecurityToken() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) {
- return Handle<Value>();
- }
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Object* security_token = env->security_token();
i::Handle<i::Object> token_handle(security_token, isolate);
@@ -5494,67 +5259,42 @@ v8::Local<v8::Context> Context::GetEntered() {
if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
return Local<Context>();
}
- i::Handle<i::Object> last =
- isolate->handle_scope_implementer()->LastEnteredContext();
- if (last.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext();
}
v8::Local<v8::Context> Context::GetCurrent() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
- return Local<Context>();
- }
return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
}
v8::Local<v8::Context> Context::GetCalling() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> calling =
- isolate->GetCallingNativeContext();
- if (calling.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetCallingContext();
}
v8::Local<v8::Object> Context::Global() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::Global()")) {
- return Local<v8::Object>();
- }
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
i::Handle<i::Object> global(context->global_proxy(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
void Context::DetachGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
isolate->bootstrapper()->DetachGlobal(context);
}
void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Handle<i::JSGlobalProxy> global_proxy =
i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
@@ -5562,44 +5302,23 @@ void Context::ReattachGlobal(Handle<Object> global_object) {
void Context::AllowCodeGenerationFromStrings(bool allow) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
- return;
- }
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
context->set_allow_code_gen_from_strings(
allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
}
bool Context::IsCodeGenerationFromStringsAllowed() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
- return false;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
return !context->allow_code_gen_from_strings()->IsFalse();
}
void Context::SetErrorMessageForCodeGenerationFromStrings(
Handle<String> error) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
@@ -5655,7 +5374,6 @@ Local<External> v8::External::New(void* value) {
void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
return ExternalValue(*Utils::OpenHandle(this));
}
@@ -5848,7 +5566,6 @@ Local<String> v8::String::NewExternal(
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (i::StringShape(*obj).IsExternalTwoByte()) {
return false; // Already an external string.
}
@@ -5899,7 +5616,6 @@ bool v8::String::MakeExternal(
v8::String::ExternalAsciiStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (i::StringShape(*obj).IsExternalTwoByte()) {
return false; // Already an external string.
}
@@ -5937,7 +5653,12 @@ bool v8::String::CanMakeExternal() {
if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
+
+ // TODO(yangguo): Externalizing sliced/cons strings allocates.
+ // This rule can be removed when all code that can
+ // trigger an access check is handlified and therefore GC safe.
+ if (isolate->heap()->old_pointer_space()->Contains(*obj)) return false;
+
if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kShortSize) return false;
@@ -5970,7 +5691,6 @@ Local<v8::Value> v8::NumberObject::New(double value) {
double v8::NumberObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0;
LOG_API(isolate, "NumberObject::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -5994,7 +5714,6 @@ Local<v8::Value> v8::BooleanObject::New(bool value) {
bool v8::BooleanObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0;
LOG_API(isolate, "BooleanObject::BooleanValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6015,9 +5734,6 @@ Local<v8::Value> v8::StringObject::New(Handle<String> value) {
Local<v8::String> v8::StringObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) {
- return Local<v8::String>();
- }
LOG_API(isolate, "StringObject::StringValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6039,8 +5755,6 @@ Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::SymbolObject::SymbolValue()"))
- return Local<v8::Symbol>();
LOG_API(isolate, "SymbolObject::SymbolValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6068,7 +5782,6 @@ Local<v8::Value> v8::Date::New(double time) {
double v8::Date::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
@@ -6142,10 +5855,6 @@ Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
Local<v8::String> v8::RegExp::GetSource() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
- return Local<v8::String>();
- }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
}
@@ -6162,9 +5871,6 @@ REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
- return v8::RegExp::kNone;
- }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return static_cast<RegExp::Flags>(obj->GetFlags().value());
}
@@ -6185,8 +5891,6 @@ Local<v8::Array> v8::Array::New(int length) {
uint32_t v8::Array::Length() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
if (length->IsSmi()) {
@@ -6212,7 +5916,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE(isolate);
ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -6264,8 +5968,6 @@ void v8::ArrayBuffer::Neuter() {
size_t v8::ArrayBuffer::ByteLength() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
}
@@ -6315,18 +6017,7 @@ size_t v8::ArrayBufferView::ByteLength() {
}
-void* v8::ArrayBufferView::BaseAddress() {
- i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
- void* buffer_data = buffer->backing_store();
- size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
- return static_cast<uint8_t*>(buffer_data) + byte_offset;
-}
-
-
size_t v8::TypedArray::Length() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->length()->Number());
}
@@ -6584,14 +6275,12 @@ void V8::SetCaptureStackTraceForUncaughtExceptions(
void V8::SetCounterFunction(CounterLookupCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
isolate->stats_table()->SetCounterFunction(callback);
}
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
isolate->InitializeLoggingAndCounters();
isolate->counters()->ResetHistograms();
@@ -6600,7 +6289,6 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
isolate->stats_table()->
SetAddHistogramSampleFunction(callback);
}
@@ -6608,9 +6296,6 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
- return;
- }
isolate->SetFailedAccessCheckCallback(callback);
}
@@ -6624,8 +6309,7 @@ intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() ||
- IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ if (isolate == NULL || !isolate->IsInitialized()) {
return 0;
}
Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate);
@@ -6647,9 +6331,15 @@ CpuProfiler* Isolate::GetCpuProfiler() {
}
+bool Isolate::InContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->context() != NULL;
+}
+
+
v8::Local<v8::Context> Isolate::GetCurrentContext() {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Context* context = internal_isolate->context();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Context* context = isolate->context();
if (context == NULL) return Local<Context>();
i::Context* native_context = context->global_object()->native_context();
if (native_context == NULL) return Local<Context>();
@@ -6657,73 +6347,119 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() {
}
-void Isolate::SetObjectGroupId(const Persistent<Value>& object,
- UniqueId id) {
+v8::Local<v8::Context> Isolate::GetCallingContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> calling = isolate->GetCallingNativeContext();
+ if (calling.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
+}
+
+
+v8::Local<v8::Context> Isolate::GetEnteredContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> last =
+ isolate->handle_scope_implementer()->LastEnteredContext();
+ if (last.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(last));
+}
+
+
+v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ ENTER_V8(isolate);
+ // If we're passed an empty handle, we throw an undefined exception
+ // to deal more gracefully with out of memory situations.
+ if (value.IsEmpty()) {
+ isolate->ScheduleThrow(isolate->heap()->undefined_value());
+ } else {
+ isolate->ScheduleThrow(*Utils::OpenHandle(*value));
+ }
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+}
+
+
+void Isolate::SetObjectGroupId(internal::Object** object, UniqueId id) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetObjectGroupId(
- Utils::OpenPersistent(object).location(),
+ v8::internal::Handle<v8::internal::Object>(object).location(),
id);
}
-void Isolate::SetReferenceFromGroup(UniqueId id,
- const Persistent<Value>& object) {
+void Isolate::SetReferenceFromGroup(UniqueId id, internal::Object** object) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetReferenceFromGroup(
id,
- Utils::OpenPersistent(object).location());
+ v8::internal::Handle<v8::internal::Object>(object).location());
}
-void Isolate::SetReference(const Persistent<Object>& parent,
- const Persistent<Value>& child) {
+void Isolate::SetReference(internal::Object** parent,
+ internal::Object** child) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Object** parent_location = Utils::OpenPersistent(parent).location();
+ i::Object** parent_location =
+ v8::internal::Handle<v8::internal::Object>(parent).location();
internal_isolate->global_handles()->SetReference(
reinterpret_cast<i::HeapObject**>(parent_location),
- Utils::OpenPersistent(child).location());
+ v8::internal::Handle<v8::internal::Object>(child).location());
}
-void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
- isolate->heap()->SetGlobalGCPrologueCallback(callback);
+void Isolate::AddGCPrologueCallback(GCPrologueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
-void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
- isolate->heap()->SetGlobalGCEpilogueCallback(callback);
+void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCPrologueCallback(callback);
+}
+
+
+void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+}
+
+
+void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
- isolate->heap()->AddGCPrologueCallback(callback, gc_type);
+ isolate->heap()->AddGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
- isolate->heap()->RemoveGCPrologueCallback(callback);
+ isolate->heap()->RemoveGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
- isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+ isolate->heap()->AddGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
- isolate->heap()->RemoveGCEpilogueCallback(callback);
+ isolate->heap()->RemoveGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
}
@@ -6731,7 +6467,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
isolate->memory_allocator()->AddMemoryAllocationCallback(
callback, space, action);
}
@@ -6739,7 +6474,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
isolate->memory_allocator()->RemoveMemoryAllocationCallback(
callback);
}
@@ -6747,17 +6481,11 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
if (callback == NULL) return;
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
i::V8::AddCallCompletedCallback(callback);
}
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
i::V8::RemoveCallCompletedCallback(callback);
}
@@ -6843,7 +6571,6 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6865,7 +6592,6 @@ String::Utf8Value::~Utf8Value() {
String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6887,7 +6613,6 @@ String::AsciiValue::~AsciiValue() {
String::Value::Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -7062,6 +6787,16 @@ void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
}
+void Debug::SendCommand(Isolate* isolate,
+ const uint16_t* command,
+ int length,
+ ClientData* client_data) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
+}
+
+
void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data,
Isolate* isolate) {
@@ -7187,7 +6922,6 @@ void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
Handle<String> CpuProfileNode::GetFunctionName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
@@ -7210,7 +6944,6 @@ int CpuProfileNode::GetScriptId() const {
Handle<String> CpuProfileNode::GetScriptResourceName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
node->entry()->resource_name()));
@@ -7222,16 +6955,15 @@ int CpuProfileNode::GetLineNumber() const {
}
-const char* CpuProfileNode::GetBailoutReason() const {
- const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return node->entry()->bailout_reason();
+int CpuProfileNode::GetColumnNumber() const {
+ return reinterpret_cast<const i::ProfileNode*>(this)->
+ entry()->column_number();
}
-double CpuProfileNode::GetSelfSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
+const char* CpuProfileNode::GetBailoutReason() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->bailout_reason();
}
@@ -7264,7 +6996,6 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
void CpuProfile::Delete() {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::Delete");
i::CpuProfiler* profiler = isolate->cpu_profiler();
ASSERT(profiler != NULL);
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
@@ -7282,7 +7013,6 @@ unsigned CpuProfile::GetUid() const {
Handle<String> CpuProfile::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
profile->title()));
@@ -7374,15 +7104,12 @@ static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
}
Handle<Value> HeapGraphEdge::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
@@ -7398,21 +7125,17 @@ Handle<Value> HeapGraphEdge::GetName() const {
isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
}
- return v8::Undefined();
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from);
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
@@ -7425,44 +7148,33 @@ static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
HeapGraphNode::Type HeapGraphNode::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
}
Handle<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
SnapshotObjectId HeapGraphNode::GetId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id();
}
int HeapGraphNode::GetSelfSize() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
}
int HeapGraphNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
return ToInternal(this)->children().length();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->children()[index]);
}
@@ -7470,7 +7182,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
return !object.is_null() ?
ToApiHandle<Value>(object) :
@@ -7486,7 +7197,6 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
void HeapSnapshot::Delete() {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
ToInternal(this)->Delete();
} else {
@@ -7497,61 +7207,46 @@ void HeapSnapshot::Delete() {
unsigned HeapSnapshot::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
}
Handle<String> HeapSnapshot::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->title()));
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
}
int HeapSnapshot::GetNodesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries().length();
}
const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
&ToInternal(this)->entries().at(index));
}
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
return ToInternal(this)->max_snapshot_js_object_id();
}
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
@@ -7632,6 +7327,16 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
}
+void HeapProfiler::StartRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording();
+}
+
+
+void HeapProfiler::StopRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording();
+}
+
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -7762,9 +7467,11 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
- if (!saved_contexts_.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
- v->VisitPointers(start, start + saved_contexts_.length());
+ List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
+ for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) {
+ if (context_lists[i]->is_empty()) continue;
+ Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
+ v->VisitPointers(start, start + context_lists[i]->length());
}
}
@@ -7823,7 +7530,7 @@ DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this);
for (int i = 0; i < blocks_.length(); i++) {
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
#endif
isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 51bc4942b2..9197bafbc5 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -542,12 +542,12 @@ class HandleScopeImplementer {
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
- inline void EnterContext(Handle<Object> context);
- inline bool LeaveLastContext();
+ inline void EnterContext(Handle<Context> context);
+ inline bool LeaveContext(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
- inline Handle<Object> LastEnteredContext();
+ inline Handle<Context> LastEnteredContext();
inline void SaveContext(Context* context);
inline Context* RestoreContext();
@@ -592,7 +592,7 @@ class HandleScopeImplementer {
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
- List<Handle<Object> > entered_contexts_;
+ List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
Object** spare_;
@@ -630,21 +630,23 @@ bool HandleScopeImplementer::HasSavedContexts() {
}
-void HandleScopeImplementer::EnterContext(Handle<Object> context) {
- entered_contexts_.Add(context);
+void HandleScopeImplementer::EnterContext(Handle<Context> context) {
+ entered_contexts_.Add(*context);
}
-bool HandleScopeImplementer::LeaveLastContext() {
+bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
if (entered_contexts_.is_empty()) return false;
+ // TODO(dcarney): figure out what's wrong here
+ // if (entered_contexts_.last() != *context) return false;
entered_contexts_.RemoveLast();
return true;
}
-Handle<Object> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Object>::null();
- return entered_contexts_.last();
+Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ if (entered_contexts_.is_empty()) return Handle<Context>::null();
+ return Handle<Context>(entered_contexts_.last());
}
@@ -665,7 +667,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#ifdef DEBUG
// SealHandleScope may make the prev_limit to point inside the block.
if (block_start <= prev_limit && prev_limit <= block_limit) {
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(prev_limit, block_limit);
#endif
break;
@@ -675,7 +677,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#endif
blocks_.RemoveLast();
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(block_start, block_limit);
#endif
if (spare_ != NULL) {
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index 5fb36c09e7..6431901bf2 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -71,7 +71,6 @@ function InstantiateFunction(data, name) {
(serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) {
try {
- cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
var flags = %GetTemplateField(data, kApiFlagOffset);
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index 287805717e..3a4d733152 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -38,7 +38,7 @@ template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
- Object** handle = &this->end()[kReturnValueOffset];
+ Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return v8::Handle<V>();
return Utils::Convert<Object, V>(Handle<Object>(handle));
@@ -49,7 +49,7 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- FunctionCallbackInfo<v8::Value> info(end(),
+ FunctionCallbackInfo<v8::Value> info(begin(),
argv_,
argc_,
is_construct_call_);
@@ -63,7 +63,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@@ -75,7 +75,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@@ -88,7 +88,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@@ -101,7 +101,7 @@ void PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
}
@@ -118,4 +118,3 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
} } // namespace v8::internal
-
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index c1db98b53d..92e57401f2 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -137,7 +137,7 @@ class CustomArgumentsBase : public Relocatable {
v->VisitPointers(values_, values_ + kArrayLength);
}
protected:
- inline Object** end() { return values_ + kArrayLength - 1; }
+ inline Object** begin() { return values_; }
explicit inline CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
Object* values_[kArrayLength];
@@ -151,7 +151,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
- this->end()[kReturnValueOffset] =
+ this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
@@ -162,7 +162,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
v8::Handle<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
- return reinterpret_cast<Isolate*>(this->end()[T::kIsolateIndex]);
+ return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
}
};
@@ -185,7 +185,7 @@ class PropertyCallbackArguments
Object* self,
JSObject* holder)
: Super(isolate) {
- Object** values = this->end();
+ Object** values = this->begin();
values[T::kThisIndex] = self;
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
@@ -237,6 +237,13 @@ class FunctionCallbackArguments
typedef FunctionCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
+ static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
+ static const int kCalleeIndex = T::kCalleeIndex;
+ static const int kContextSaveIndex = T::kContextSaveIndex;
FunctionCallbackArguments(internal::Isolate* isolate,
internal::Object* data,
@@ -249,10 +256,11 @@ class FunctionCallbackArguments
argv_(argv),
argc_(argc),
is_construct_call_(is_construct_call) {
- Object** values = end();
+ Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
+ values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index a1d1e1b567..e3b39f407c 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -208,6 +208,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
static const int kNoCodeAgeSequenceLength = 3;
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index bd8b0613eb..05b25ae2d7 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -50,6 +50,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
unsigned CpuFeatures::cache_line_size_ = 64;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 866b1c9024..8caa64df34 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -64,23 +64,41 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- return (supported_ & (1u << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
static unsigned cache_line_size() { return cache_line_size_; }
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ unsigned mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, unsigned set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static unsigned flag2set(CpuFeature f) {
+ return 1u << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
@@ -88,7 +106,10 @@ class CpuFeatures : public AllStatic {
static unsigned found_by_runtime_probing_only_;
static unsigned cache_line_size_;
+ static unsigned cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -114,21 +135,47 @@ class CpuFeatures : public AllStatic {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
+
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters =
+ FLAG_enable_ool_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
+ if (FLAG_enable_ool_constant_pool && (reg.code() >= kRegister_r8_Code)) {
+ return reg.code() - 1;
+ }
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ return from_code(index + 1);
+ }
return from_code(index);
}
@@ -143,7 +190,11 @@ struct Register {
"r5",
"r6",
"r7",
+ "r8",
};
+ if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ return names[index + 1];
+ }
return names[index];
}
@@ -172,25 +223,6 @@ struct Register {
int code_;
};
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
const Register no_reg = { kRegister_no_reg_Code };
const Register r0 = { kRegister_r0_Code };
@@ -200,6 +232,7 @@ const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
+// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index f60e1f8671..60f5290030 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -193,14 +193,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = r2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- &not_cached);
+ __ LookupNumberStringCache(r0, // Input.
+ argument, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ r5, // Scratch.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
@@ -447,9 +445,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
@@ -457,14 +454,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmp(r0, r6);
+ __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ cmp(r0, ip);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
- __ InitializeFieldsWithFiller(r5, r0, r7);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
}
- __ InitializeFieldsWithFiller(r5, r6, r7);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -529,16 +528,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, kUndefinedValueNotLoaded);
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
@@ -702,7 +695,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r7, cp may be clobbered
+ // r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@@ -742,7 +735,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
+ if (!FLAG_enable_ool_constant_pool) {
+ __ mov(r7, Operand(r4));
+ }
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -807,12 +802,13 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
- // r1 - function object
+ // r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
@@ -830,6 +826,39 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - isolate
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+
+ // Jump to point after the code-age stub.
+ __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
+ __ mov(pc, r0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -895,21 +924,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -956,6 +970,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index cd1809fb2a..9330eb1411 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -59,6 +59,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -77,7 +88,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -158,6 +169,18 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -170,7 +193,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &r0;
+ descriptor->stack_parameter_count_ = r0;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -192,7 +215,7 @@ static void InitializeInternalArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &r0;
+ descriptor->stack_parameter_count_ = r0;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -825,8 +848,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert lhs to a double in d7.
__ SmiToDouble(d7, lhs);
// Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a smi.
@@ -851,8 +873,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a heap number.
// Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
// Convert rhs to a double in d6 .
__ SmiToDouble(d6, rhs);
// Fall through to both_loaded_as_doubles.
@@ -920,10 +941,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(both_loaded_as_doubles);
}
@@ -972,108 +991,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -1281,994 +1198,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in r0.
-// Register heapnumber_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// d0: Left value.
-// d1: Right value.
-// If soft float ABI, use also r0, r1, r2, r3.
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (!masm->use_eabi_hardfloat()) {
- __ vmov(r0, r1, d0);
- __ vmov(r2, r3, d1);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // VFP2 is a base requirement for V8
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ push(r2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &not_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV: {
- Label div_with_sdiv;
-
- // Check for 0 divisor.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ b(ne, &div_with_sdiv);
- // Check for no remainder.
- __ tst(left, scratch1);
- __ b(ne, &not_smi_result);
- // Check for positive left hand side.
- __ cmp(left, Operand::Zero());
- __ b(mi, &div_with_sdiv);
- } else {
- __ b(ne, &not_smi_result);
- // Check for positive and no remainder.
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
- }
-
- // Perform division by shifting.
- __ clz(scratch1, scratch1);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm, SUDIV);
- Label result_not_zero;
-
- __ bind(&div_with_sdiv);
- // Do division.
- __ sdiv(scratch1, left, right);
- // Check that the remainder is zero.
- __ mls(scratch2, scratch1, right, left);
- __ cmp(scratch2, Operand::Zero());
- __ b(ne, &not_smi_result);
- // Check for negative zero result.
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &result_not_zero);
- __ cmp(right, Operand::Zero());
- __ b(lt, &not_smi_result);
- __ bind(&result_not_zero);
- // Check for the corner case of dividing the most negative smi by -1.
- __ cmp(scratch1, Operand(0x40000000));
- __ b(eq, &not_smi_result);
- // Tag and return the result.
- __ SmiTag(right, scratch1);
- __ Ret();
- }
- break;
- }
- case Token::MOD: {
- Label modulo_with_sdiv;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- // Check for x % 0.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &modulo_with_sdiv);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- __ b(ne, &modulo_with_sdiv);
- } else {
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- }
-
- // Perform modulus by masking (scratch1 contains right - 1).
- __ and_(right, left, Operand(scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm, SUDIV);
- __ bind(&modulo_with_sdiv);
- __ mov(scratch2, right);
- // Perform modulus with sdiv and mls.
- __ sdiv(scratch1, left, right);
- __ mls(right, scratch1, right, left);
- // Return if the result is not 0.
- __ cmp(right, Operand::Zero());
- __ Ret(ne);
- // The result is 0, check for -0 case.
- __ cmp(left, Operand::Zero());
- __ Ret(pl);
- // This is a -0 case, restore the value of right.
- __ mov(right, scratch2);
- // We fall through here to not_smi_result to produce -0.
- }
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &not_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ TrySmiTag(right, scratch1, &not_smi_result);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r6;
- Register scratch2 = r7;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- Register result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into d0 and d1.
- if (smi_operands) {
- __ SmiToDouble(d1, right);
- __ SmiToDouble(d0, left);
- } else {
- // Load right operand into d1.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, d1, heap_number_map, scratch1, d8, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, d1, heap_number_map, scratch1, fail);
- }
- // Load left operand into d0.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, d0, heap_number_map, scratch1, d8, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(
- left, d0, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using VFP registers:
- // d0: Left value
- // d1: Right value
- switch (op) {
- case Token::ADD:
- __ vadd(d5, d0, d1);
- break;
- case Token::SUB:
- __ vsub(d5, d0, d1);
- break;
- case Token::MUL:
- __ vmul(d5, d0, d1);
- break;
- case Token::DIV:
- __ vdiv(d5, d0, d1);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ b(mi, &result_not_a_smi);
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ TrySmiTag(r0, r2, &result_not_a_smi);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- __ vmov(s0, r2);
- if (op == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
- __ b(ne, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- LowDwVfpRegister double_scratch = d0;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- __ LoadNumberAsInt32Double(
- right, d1, heap_number_map, scratch1, d8, &transition);
- __ LoadNumberAsInt32Double(
- left, d0, heap_number_map, scratch1, d8, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d0, d1);
- break;
- case Token::SUB:
- __ vsub(d5, d0, d1);
- break;
- case Token::MUL:
- __ vmul(d5, d0, d1);
- break;
- case Token::DIV:
- __ vdiv(d5, d0, d1);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- __ TryDoubleToInt32Exact(scratch1, d5, d8);
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTag(scratch1, SetCC);
- __ b(vs, &return_heap_number);
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ b(ne, &not_zero);
- __ VmovHigh(scratch2, d5);
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &transition);
- __ bind(&not_zero);
- __ mov(r0, scratch1);
- __ Ret();
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Vmov(d8, fixed_right_arg_value(), scratch1);
- __ VFPCompareAndSetFlags(d1, d8);
- __ b(ne, &transition);
- }
-
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(left, r3, heap_number_map,
- scratch1, d0, d1, &transition);
- __ LoadNumberAsInt32(right, r2, heap_number_map,
- scratch1, d0, d1, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi. If not try to return a heap number.
- // (We know the result is an int32).
- __ TrySmiTag(r0, r2, &return_heap_number);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in d2, double result goes
// into d2.
@@ -2280,7 +1209,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label calculate;
Label invalid_cache;
const Register scratch0 = r9;
- const Register scratch1 = r7;
+ Register scratch1 = no_reg; // will be r4
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
@@ -2360,6 +1289,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmp(r2, r4);
__ cmp(r3, r5, eq);
__ b(ne, &calculate);
+
+ scratch1 = r4; // Start of scratch1 range.
+
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
@@ -2502,7 +1434,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const DwVfpRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
- const Register scratch2 = r7;
+ const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
@@ -2708,6 +1640,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
@@ -2765,9 +1698,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(2, 0, r1);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
+ 2, 0);
}
ExternalReference scope_depth =
@@ -2841,7 +1775,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// sp: stack pointer
// fp: frame pointer
// Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
+ __ LeaveExitFrame(save_doubles_, r4, true);
__ mov(pc, lr);
// check if we should retry or throw exception
@@ -3011,14 +1945,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// r4: argv
Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
+ __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ Push(ip, r8, r6, r5);
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -3064,7 +1998,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
+ // Must preserve r0-r4, r5-r6 are available.
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
@@ -3375,8 +2309,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = r0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -3672,31 +2605,36 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ add(r3, r4, Operand(r6, LSL, 1));
__ add(r3, r3, Operand(kParameterMapHeaderSize));
// r6 = loop variable (tagged)
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
+ // r4 = address of parameter map (tagged), which is also the address of new
+ // object + Heap::kArgumentsObjectSize (tagged)
+ // r0 = temporary scratch (a.o., for address calculation)
+ // r5 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
__ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
+ __ mov(r0, Operand(r6, LSL, 1));
+ __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r0));
+ __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r5, MemOperand(r3, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
__ cmp(r6, Operand(Smi::FromInt(0)));
__ b(ne, &parameters_loop);
+ // Restore r0 = new object (tagged)
+ __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+
__ bind(&skip_parameter_map);
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
// r3 = address of backing store (tagged)
// r5 = scratch
@@ -3727,6 +2665,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Ret();
// Do the runtime call to allocate the arguments object.
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
@@ -3855,7 +2794,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
- Register last_match_info_elements = r6;
+ Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -3988,19 +2927,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// (E) Carry on. String handling is done.
- // r7: irregexp code
+ // r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
+ __ JumpIfSmi(r6, &runtime);
// r1: previous index
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
+ // r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
@@ -4067,11 +3006,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r0, subject);
// Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
+ stub.GenerateCall(masm, r6);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
+
+ last_match_info_elements = r6;
// r0: result
// subject: subject string (callee saved)
@@ -4161,7 +3102,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ mov(subject, r2);
@@ -4171,7 +3112,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
@@ -4343,6 +3284,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4364,9 +3306,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ ldr(r5, FieldMemOperand(r3, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
@@ -4403,6 +3342,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ push(r0);
__ push(r1);
@@ -4739,7 +3679,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags) {
bool ascii = (flags & COPY_ASCII) != 0;
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
@@ -4814,30 +3753,29 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
__ bind(&loop);
__ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
__ str(scratch1, MemOperand(dest, 4, PostIndex));
__ mov(scratch1, Operand(scratch3, LSR, right_shift));
// Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ sub(scratch3, limit, Operand(dest));
+ __ sub(scratch3, scratch3, Operand(4), SetCC);
__ b(ge, &loop);
}
// There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
+ // number is in scratch3), and between one and three bytes already read into
// scratch1 (eight times that number in scratch4). We may have read past
// the end of the string, but because objects are aligned, we have not read
// past the end of the object.
// Find the minimum of remaining characters to move and preloaded characters
// and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ add(scratch3, scratch3, Operand(4), SetCC);
__ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
// Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
+ __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch3) characters already read into
// scratch ready to write.
- __ cmp(scratch5, Operand(2));
+ __ cmp(scratch3, Operand(2));
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
__ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
@@ -5177,10 +4115,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
@@ -5221,7 +4159,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ b(eq, &two_byte_sequential);
// Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
__ add(r5, r5, r3);
@@ -5233,13 +4171,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result string length
// r5: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
// Allocate and copy the resulting two-byte string.
__ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -5253,7 +4191,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r5: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+ masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
__ bind(&return_r0);
Counters* counters = masm->isolate()->counters();
@@ -5519,7 +4457,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
&call_runtime);
// Get the two characters forming the sub string.
@@ -5530,7 +4468,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// just allocate a new one.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+ masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -5575,7 +4513,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
Label skip_write_barrier, after_writing;
@@ -5586,15 +4524,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ cmp(r4, Operand::Zero());
__ b(eq, &skip_write_barrier);
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ RecordWriteField(r7,
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ RecordWriteField(r3,
ConsString::kFirstOffset,
r0,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ RecordWriteField(r7,
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
+ __ RecordWriteField(r3,
ConsString::kSecondOffset,
r1,
r4,
@@ -5603,12 +4541,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
__ bind(&after_writing);
- __ mov(r0, Operand(r7));
+ __ mov(r0, Operand(r3));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -5628,7 +4566,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
__ jmp(&allocated);
// We cannot encounter sliced strings or cons strings here since:
@@ -5652,14 +4590,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
// Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
+ __ eor(ip, r4, Operand(r5));
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
+ __ tst(ip, Operand(kStringEncodingMask));
__ b(ne, &call_runtime);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
+ __ add(r6,
r0,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
@@ -5669,7 +4608,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(r4, Operand(kShortExternalStringMask));
__ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
+ __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
__ bind(&first_prepared);
STATIC_ASSERT(kSeqStringTag == 0);
@@ -5689,76 +4628,57 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
- // r7: first character of first string
+ // r6: first character of first string
// r1: first character of second string
// r2: length of first string.
// r3: length of second string.
- // r6: sum of lengths.
// Both strings have the same encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(r5, Operand(kStringEncodingMask));
__ b(eq, &non_ascii_string_add_flat_result);
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5792,13 +4712,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
__ bind(&done);
@@ -6401,7 +5315,7 @@ struct AheadOfTimeWriteBarrierStubList {
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
@@ -6428,8 +5342,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
// StringAddStub::Generate
- { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index d05e9a1d84..c03d8f27ec 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -106,7 +106,6 @@ class StringHelper : public AllStatic {
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags);
@@ -257,31 +256,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 1bcf3e3a60..44c331b75f 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -55,7 +55,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
+ return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
fast_exp_arm_machine_code, x, 0);
}
#endif
@@ -402,8 +402,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(r2, r4);
- __ b(eq, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
}
// Set transitioned map.
@@ -432,8 +431,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(r2, r4);
- __ b(eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -444,15 +442,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
+ __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ // r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
@@ -483,15 +482,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
+ __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
+ // r9: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
@@ -514,30 +513,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
+ __ ldr(lr, MemOperand(r3, 4, PostIndex));
+ // lr: current element
+ __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
- __ vmov(s0, r9);
+ __ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
+ __ vstr(d0, r9, 0);
+ __ add(r9, r9, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ SmiTag(lr);
+ __ orr(lr, lr, Operand(1));
+ __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+ __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ bind(&entry);
- __ cmp(r7, r6);
+ __ cmp(r9, r6);
__ b(lt, &loop);
__ pop(lr);
@@ -558,8 +557,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(r2, r4);
- __ b(eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -577,7 +575,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
- __ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
@@ -589,14 +587,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
- // r7: the-hole pointer
// r9: heap number map
__ b(&entry);
@@ -608,7 +604,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
+ // r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
@@ -631,7 +627,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ str(r0, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
@@ -775,50 +772,65 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
- Label done;
+ Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
- __ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &done);
+ __ b(ge, &zero);
+
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
- __ vldr(result, ExpConstant(2, temp3));
- __ b(ge, &done);
+ __ b(ge, &infinity);
+
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
- __ vmov(temp2, temp1, double_scratch1);
+ __ VmovLow(temp2, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
- __ vmul(input, double_scratch1, double_scratch1);
- __ vmul(result, result, input);
- __ mov(temp1, Operand(temp2, LSR, 11));
+ __ vmul(double_scratch2, double_scratch1, double_scratch1);
+ __ vmul(result, result, double_scratch2);
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
- __ vldr(double_scratch2, ExpConstant(8, temp3));
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ vmov(double_scratch2, 1);
__ vadd(result, result, double_scratch2);
- __ movw(ip, 0x7ff);
- __ and_(temp2, temp2, Operand(ip));
+ __ mov(temp1, Operand(temp2, LSR, 11));
+ __ Ubfx(temp2, temp2, 0, 11);
__ add(temp1, temp1, Operand(0x3ff));
- __ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
- __ add(temp3, temp3, Operand(kPointerSize));
- __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
- __ orr(temp1, temp1, temp2);
- __ vmov(input, ip, temp1);
- __ vmul(result, result, input);
+ __ add(temp3, temp3, Operand(temp2, LSL, 3));
+ __ ldm(ia, temp3, temp2.bit() | temp3.bit());
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ orr(temp1, temp3, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp2, temp1);
+ } else {
+ __ orr(temp1, temp2, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp3, temp1);
+ }
+ __ vmul(result, result, double_scratch1);
+ __ b(&done);
+
+ __ bind(&zero);
+ __ vmov(result, kDoubleRegZero);
+ __ b(&done);
+
+ __ bind(&infinity);
+ __ vldr(result, ExpConstant(2, temp3));
+
__ bind(&done);
}
@@ -859,7 +871,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
@@ -870,16 +882,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 54530d8726..ecbe64cbad 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -97,6 +97,7 @@ class StringCharLoadGenerator : public AllStatic {
class MathExpGenerator : public AllStatic {
public:
+ // Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 3c57b64395..9339c5fade 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// <decrement profiling counter>
-// 2a 00 00 01 bpl ok
-// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
-// e1 2f ff 3c blx ip
-// ok-label
-//
-// We patch the code to the following form:
-//
-// <decrement profiling counter>
-// e1 a0 00 00 mov r0, r0 (NOP)
-// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
-// e1 2f ff 3c blx ip
-// ok-label
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Turn the jump into nops.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
- // Replace the call address.
- uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Restore the original jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- // Restore the original call address.
- uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- static const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
-
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
-
- if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
- Memory::uint32_at(interrupt_address_pointer));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
- Memory::uint32_at(interrupt_address_pointer));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -201,10 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->environment_length();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
@@ -362,8 +265,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
@@ -409,9 +312,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ InitializeRootRegister();
__ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
+ __ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
- __ Jump(r7);
+ __ Jump(ip);
__ stop("Unreachable.");
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index d022b414b4..64a718e89f 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -64,7 +64,7 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
+ 1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index b6fb70b5df..c57c785598 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -148,13 +148,10 @@ void FullCodeGenerator::Generate() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
__ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ str(r2, MemOperand(sp, receiver_offset), ne);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -163,16 +160,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ nop(ip.code());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1167,7 +1155,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(r1, cell);
+ __ Move(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
@@ -1651,13 +1639,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -3592,8 +3578,8 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into r0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3964,9 +3950,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
+ Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
@@ -3984,19 +3969,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Register string = r4;
Register element = r5;
Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
+ Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
+ __ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -4033,11 +4017,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1), SetCC);
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+ __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -4058,23 +4042,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch));
+ __ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2), SetCC);
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@@ -4091,9 +4075,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
- scratch1,
- scratch2,
- elements_end,
+ scratch,
+ string, // used as scratch
+ elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
@@ -4106,8 +4090,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -4125,7 +4109,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4157,7 +4141,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4178,7 +4162,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
@@ -4187,7 +4171,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4894,6 +4878,91 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 3 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // 2a 00 00 01 bpl ok
+ // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
+ // e1 2f ff 3c blx ip
+ // ok-label
+ patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
+ ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // e1 a0 00 00 mov r0, r0 (NOP)
+ // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
+ // e1 2f ff 3c blx ip
+ // ok-label
+ patcher.masm()->nop();
+ break;
+ }
+
+ Address pc_immediate_load_address = pc - 2 * kInstrSize;
+ // Replace the call address.
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+ Address interrupt_address_pointer = pc + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
+ reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
+
+ Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = pc - 2 * kInstrSize;
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+ Address interrupt_address_pointer = pc + interrupt_address_offset;
+
+ if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
+ ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_immediate_load_address)));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_immediate_load_address)));
+
+ if (Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index f15d4b11f8..aded4c1dd8 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -1394,7 +1394,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
+ Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1487,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 59a8818ac6..86d5d2b329 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -412,18 +412,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -439,7 +440,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -655,7 +656,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -710,51 +711,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
}
- } else {
- right = UseRegisterAtStart(right_value);
- }
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -763,29 +757,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = UseFixedDouble(instr->right(), d2);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -861,9 +860,31 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ HValue* first_operand = current->OperandCount() == 0
+ ? graph()->GetConstant1()
+ : current->OperandAt(0);
+ instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand)));
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -893,14 +914,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -992,19 +1011,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@@ -1047,9 +1062,10 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0),
+ UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1057,18 +1073,14 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
- FixedTemp(r4));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), r0),
+ FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1091,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1122,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
}
- return NULL;
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1140,7 +1150,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@@ -1158,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1206,8 +1216,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -1243,7 +1257,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
+ LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
@@ -1269,57 +1283,57 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallFunction(context, function), r0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
}
@@ -1347,41 +1361,34 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineAsRegister(div));
}
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1502,17 +1509,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, d1),
- UseFixedDouble(right, d2));
- return MarkAsCall(DefineFixedDouble(mod, d1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1679,7 +1679,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1737,9 +1736,10 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1766,6 +1766,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1774,8 +1776,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1813,10 +1815,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1883,11 +1886,9 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseTempRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
}
@@ -1905,9 +1906,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1936,7 +1945,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -2006,8 +2014,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2040,12 +2049,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -2093,8 +2096,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), r0),
+ return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
parameter_count);
}
@@ -2127,8 +2133,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -2144,10 +2152,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -2182,8 +2191,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0);
return MarkAsCall(result, instr);
}
@@ -2195,6 +2206,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2211,7 +2227,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
+ obj = UseRegister(instr->elements());
} else {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
@@ -2239,18 +2255,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2260,15 +2275,19 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
}
return new(zone()) LStoreKeyed(object, key, val);
@@ -2276,17 +2295,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
@@ -2294,6 +2309,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
LOperand* val = UseFixed(instr->value(), r0);
@@ -2302,7 +2318,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
@@ -2312,11 +2329,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
return AssignPointerMap(result);
}
}
@@ -2375,56 +2393,68 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr);
}
@@ -2471,8 +2501,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
}
@@ -2517,7 +2547,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -2556,10 +2587,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2592,7 +2626,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2604,8 +2638,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 98cacacae1..ed07229e17 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -105,7 +105,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -113,13 +112,13 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
- V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -185,6 +184,7 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
@@ -216,7 +216,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -257,15 +256,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -277,7 +267,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -305,7 +295,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -404,17 +393,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
@@ -483,8 +472,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -785,12 +780,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LMathAbs(LOperand* value) {
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@@ -939,19 +936,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1002,15 +986,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -1086,15 +1072,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -1103,28 +1091,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1145,19 +1137,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1318,7 +1297,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1373,8 +1352,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
@@ -1405,13 +1384,15 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1507,16 +1488,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const V8_OVERRIDE {
@@ -1530,11 +1516,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* parameter_count) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
- inputs_[1] = parameter_count;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
@@ -1546,7 +1533,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
- LOperand* parameter_count() { return inputs_[1]; }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1565,13 +1552,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1593,6 +1582,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1631,15 +1629,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1652,13 +1652,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1683,16 +1685,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1822,8 +1827,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
@@ -1865,13 +1876,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
@@ -1882,13 +1895,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1900,8 +1915,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
@@ -1912,13 +1933,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1927,8 +1950,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1950,13 +1979,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1967,13 +1998,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@@ -1984,13 +2017,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2031,6 +2075,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -2119,7 +2176,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2191,15 +2248,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2242,17 +2301,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2263,14 +2327,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
@@ -2280,8 +2347,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2303,15 +2372,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
@@ -2319,28 +2390,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@@ -2451,12 +2526,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
@@ -2466,15 +2546,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@@ -2493,13 +2585,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2546,8 +2640,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2558,13 +2658,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@@ -2620,8 +2722,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
@@ -2645,6 +2747,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2778,7 +2882,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 7f65023ed0..fbe8e171fa 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -98,24 +98,6 @@ void LCodeGen::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -139,33 +121,16 @@ bool LCodeGen::GeneratePrologue() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
__ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ str(r2, MemOperand(sp, receiver_offset), ne);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- } else {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ nop(ip.code());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -248,6 +213,8 @@ bool LCodeGen::GeneratePrologue() {
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -269,45 +236,15 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- RecordAndUpdatePosition(instr->position());
-
- instr->CompileToNative(this);
- }
- EnsureSpaceForLazyDeopt();
- last_lazy_deopt_pc_ = masm()->pc_offset();
- return !is_aborted();
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -448,7 +385,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
ASSERT(r.IsSmiOrTagged());
- __ LoadObject(scratch, literal);
+ __ Move(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -727,13 +664,11 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -748,20 +683,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -862,26 +813,31 @@ void LCodeGen::DeoptimizeIf(Condition condition,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -977,10 +933,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
@@ -991,7 +943,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -1013,17 +965,10 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1073,6 +1018,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@@ -1090,11 +1036,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1383,7 +1324,8 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
+ const Register dividend = ToRegister(instr->left());
+ const Register result = ToRegister(instr->result());
int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
int32_t test_value = 0;
int32_t power = 0;
@@ -1394,7 +1336,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ tst(dividend, Operand(dividend));
+ __ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (kMinInt / -1).
@@ -1409,20 +1351,26 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (test_value != 0) {
if (instr->hydrogen()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
- __ cmp(dividend, Operand(0));
- __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
- __ mov(dividend, Operand(dividend, ASR, power));
- if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
- if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt);
+ __ sub(result, dividend, Operand::Zero(), SetCC);
+ __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ __ mov(result, Operand(result, ASR, power));
+ if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
return; // Don't fall through to "__ rsb" below.
} else {
// Deoptimize if remainder is not 0.
__ tst(dividend, Operand(test_value));
DeoptimizeIf(ne, instr->environment());
- __ mov(dividend, Operand(dividend, ASR, power));
+ __ mov(result, Operand(dividend, ASR, power));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+ }
+ } else {
+ if (divisor < 0) {
+ __ rsb(result, dividend, Operand(0));
+ } else {
+ __ Move(result, dividend);
}
}
- if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
return;
}
@@ -1439,12 +1387,15 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
+ Label positive;
+ if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
+ __ cmp(right, Operand::Zero());
+ }
+ __ b(pl, &positive);
__ cmp(left, Operand::Zero());
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&positive);
}
// Check for (kMinInt / -1).
@@ -1886,7 +1837,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
+ __ Move(ToRegister(instr->result()), value);
}
@@ -1975,32 +1926,42 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
+ LOperand* index_op = instr->index();
Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ and_(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, kUnexpectedStringType);
}
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ strb(value, MemOperand(ip, index));
+ if (index_op->IsConstantOperand()) {
+ int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ strb(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+ } else {
+ __ strh(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+ }
} else {
- // MemOperand with ip as the base register is not allowed for strh, so
- // we do the address calculation explicitly.
- __ add(ip, ip, Operand(index, LSL, 1));
- __ strh(value, MemOperand(ip));
+ Register index = ToRegister(index_op);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, Operand(index));
+ __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ } else {
+ __ add(scratch, string, Operand(index, LSL, 1));
+ __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ }
}
}
@@ -2008,6 +1969,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), ip);
__ push(input_reg);
+ ASSERT(ToRegister(instr->context()).is(cp));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -2145,6 +2107,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r1));
ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -2158,13 +2121,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
int left_block = instr->TrueDestination(chunk_);
@@ -2197,25 +2153,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, al);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, al);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2371,6 +2308,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -2575,6 +2516,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2735,6 +2677,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
@@ -2844,13 +2787,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
// Get the temp register reserved by the instruction. This needs to be r4 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
ASSERT(temp.is(r4));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ __ Move(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
@@ -2879,15 +2823,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2908,8 +2845,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
+ // Runtime::TraceExit returns its parameter in r0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(r0);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
@@ -2953,7 +2893,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2964,6 +2904,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -2980,7 +2921,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register cell = scratch0();
// Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
+ __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3001,6 +2942,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -3073,7 +3015,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ ldrb(result, operand);
+ } else {
+ __ ldr(result, operand);
+ }
return;
}
@@ -3084,16 +3031,21 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ ldr(result, FieldMemOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset));
+ object = result;
+ }
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ ldrb(result, operand);
+ } else {
+ __ ldr(result, operand);
}
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -3148,6 +3100,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -3265,27 +3223,30 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Register scratch = scratch0();
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
+
+ int base_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ (instr->additional_index() << element_size_shift);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- } else {
- key = ToRegister(instr->key());
+ base_offset += constant_key << element_size_shift;
}
+ __ add(scratch, elements, Operand(base_offset));
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ add(elements, elements, Operand(key, LSL, shift_size));
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, scratch, Operand(key, LSL, shift_size));
}
- __ add(elements, elements, Operand(base_offset));
- __ vldr(result, elements, 0);
+
+ __ vldr(result, scratch, 0);
+
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
@@ -3305,7 +3266,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
+ Register key = ToRegister(instr->key());
// Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
@@ -3381,6 +3342,7 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@@ -3517,7 +3479,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
@@ -3525,7 +3486,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3554,11 +3514,11 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
+ if (info()->IsOptimizing()) {
+ __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
}
}
@@ -3572,8 +3532,9 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
@@ -3582,8 +3543,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3606,11 +3568,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (r1_state == R1_UNINITIALIZED) {
- __ LoadHeapObject(r1, function);
+ __ Move(r1, function);
}
// Change context.
@@ -3636,9 +3597,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3654,6 +3612,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3697,7 +3657,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
@@ -3872,9 +3833,9 @@ void LCodeGen::DoPower(LPower* instr) {
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
+ __ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
@@ -3968,6 +3929,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3976,6 +3940,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3984,6 +3951,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3992,6 +3962,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3999,17 +3972,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4022,17 +3994,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
@@ -4041,23 +4014,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
@@ -4066,7 +4038,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -4082,6 +4053,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -4095,6 +4067,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -4169,7 +4142,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (access.IsExternalMemory()) {
Register value = ToRegister(instr->value());
- __ str(value, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ strb(value, operand);
+ } else {
+ __ str(value, operand);
+ }
return;
}
@@ -4214,7 +4192,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
- __ str(value, FieldMemOperand(object, offset));
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ strb(value, operand);
+ } else {
+ __ str(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
@@ -4228,7 +4211,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ if (representation.IsByte()) {
+ __ strb(value, operand);
+ } else {
+ __ str(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -4246,6 +4234,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -4311,16 +4300,23 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vstr(double_scratch0().low(), address, additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
+ __ vstr(value, address, additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
@@ -4362,32 +4358,28 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
- Register key = no_reg;
Register scratch = scratch0();
+ DwVfpRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
// Calculate the effective address of the slot in the array to store the
// double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+ __ add(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(ToRegister(instr->key()), LSL, shift_size));
}
if (instr->NeedsCanonicalization()) {
@@ -4397,9 +4389,12 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
__ Assert(ne, kDefaultNaNModeNotSet);
}
- __ VFPCanonicalizeNaN(value);
+ __ VFPCanonicalizeNaN(double_scratch, value);
+ __ vstr(double_scratch, scratch,
+ instr->additional_index() << element_size_shift);
+ } else {
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@@ -4463,6 +4458,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -4496,6 +4492,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
__ Move(r0, object_reg);
@@ -4512,12 +4509,15 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(eq, instr->environment());
+ __ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
StringAddStub stub(instr->hydrogen()->flags());
@@ -4573,7 +4573,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
@@ -4625,7 +4626,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -4649,9 +4650,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* input = instr->value();
- ASSERT(input->IsRegister());
LOperand* output = instr->result();
- ASSERT(output->IsRegister());
__ SmiTag(ToRegister(output), ToRegister(input), SetCC);
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
@@ -4670,6 +4669,18 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ tst(ToRegister(input), Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ __ SmiTag(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
@@ -4764,7 +4775,15 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// integer value.
__ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Move(dst, r0);
__ sub(dst, dst, Operand(kHeapObjectTag));
@@ -4820,7 +4839,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
@@ -4855,36 +4882,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
-
- Label load_smi, heap_number, done;
-
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(ne, env);
+ if (can_convert_undefined_to_nan) {
+ __ b(ne, &convert);
} else {
- Label heap_number, convert;
- __ b(eq, &heap_number);
-
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
DeoptimizeIf(ne, env);
-
- __ bind(&convert);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
- __ jmp(&done);
-
- __ bind(&heap_number);
}
- // Heap number to double register conversion.
+ // load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
if (deoptimize_on_minus_zero) {
__ VmovLow(scratch, result_reg);
@@ -4895,11 +4906,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(eq, env);
}
__ jmp(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ jmp(&done);
+ }
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4935,18 +4955,33 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ Label no_heap_number, check_bools, check_false;
+ __ b(ne, &no_heap_number);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ b(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ __ b(ne, &check_bools);
__ mov(input_reg, Operand::Zero());
__ b(&done);
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ bind(&check_bools);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ __ b(ne, &check_false);
+ __ mov(input_reg, Operand(1));
+ __ b(&done);
+
+ __ bind(&check_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+ __ mov(input_reg, Operand::Zero());
+ __ b(&done);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
@@ -4987,15 +5022,19 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(input_reg, SetCC);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ b(cs, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
@@ -5133,7 +5172,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
@@ -5152,7 +5191,10 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
@@ -5185,7 +5227,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
@@ -5194,14 +5235,15 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(map_reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ b(ne, deferred->entry());
@@ -5355,12 +5397,15 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
+ instr->context());
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
+ instr->context());
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
+ instr->context());
}
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5374,26 +5419,27 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
- // r7 = literals array.
+ // r6 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
+ // r2-5 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(r7, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
+ __ Move(r6, instr->hydrogen()->literals());
+ __ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function
// Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
+ __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r4, Operand(instr->hydrogen()->pattern()));
+ __ mov(r3, Operand(instr->hydrogen()->flags()));
+ __ Push(r6, r5, r4, r3);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
@@ -5417,6 +5463,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
@@ -5560,16 +5607,15 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
@@ -5580,7 +5626,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5611,6 +5657,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5644,10 +5691,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- EnsureSpaceForLazyDeopt();
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5660,7 +5709,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 4b6b5ca8e3..a9b85c89cc 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -32,6 +32,7 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -43,43 +44,26 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -178,30 +162,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return r9; }
LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
- int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -214,14 +183,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
@@ -249,7 +216,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -258,9 +226,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
enum R1State {
R1_UNINITIALIZED,
@@ -276,8 +246,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallKind call_kind,
R1State r1_state);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -320,8 +288,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -383,7 +351,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Register scratch,
LEnvironment* environment);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -391,24 +359,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -420,8 +378,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 88ac7a2a21..0c6b2adadf 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -252,7 +252,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
@@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(kSavedValueRegister,
+ __ Move(kSavedValueRegister,
cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 7df785776d..d8771cb702 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -35,6 +35,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@@ -233,7 +234,19 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ mov(dst, Operand(value));
+ } else {
+ ASSERT(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ mov(dst, Operand(cell));
+ ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ mov(dst, Operand(value));
+ }
+ }
}
@@ -394,19 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- } else {
- mov(result, Operand(object));
- }
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -478,11 +478,6 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
@@ -733,9 +728,11 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
bind(&fpscr_done);
}
-void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
+
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
const Condition cond) {
- vsub(value, value, kDoubleRegZero, cond);
+ vsub(dst, src, kDoubleRegZero, cond);
}
@@ -919,6 +916,33 @@ void MacroAssembler::LoadNumberAsInt32(Register object,
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ add(r0, pc, Operand(-8));
+ ldr(pc, MemOperand(pc, -4));
+ dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ } else {
+ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ nop(ip.code());
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(2 * kPointerSize));
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
@@ -1020,7 +1044,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool restore_context) {
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1035,10 +1060,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
+
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ ldr(cp, MemOperand(ip));
+ }
#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
str(r3, MemOperand(ip));
#endif
@@ -1256,7 +1285,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- LoadHeapObject(r1, function);
+ Move(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
@@ -1330,7 +1359,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
@@ -1341,9 +1370,9 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand::Zero()); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
@@ -2280,12 +2309,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -2296,13 +2327,15 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(!thunk_last_arg.is(r3));
+
// Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
+ mov(r9, Operand(next_address));
+ ldr(r4, MemOperand(r9, kNextOffset));
+ ldr(r5, MemOperand(r9, kLimitOffset));
+ ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@@ -2313,7 +2346,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
- ASSERT(!thunk_last_arg.is(r3));
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
@@ -2349,24 +2381,25 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
}
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// load value from ReturnValue
- ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
+ ldr(r0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
+ str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
+ ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+ ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
@@ -2377,21 +2410,29 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
+ bind(&exception_handled);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ ldr(cp, *context_restore_operand);
+ }
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
+ LeaveExitFrame(false, r4, !restore_context);
mov(pc, lr);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
+ str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
mov(r0, Operand(ExternalReference::isolate_address(isolate())));
@@ -2603,7 +2644,8 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -2620,21 +2662,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
@@ -3079,6 +3107,88 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ eor(scratch1, scratch1, Operand(scratch2));
+ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ sub(probe, probe, Operand(kHeapObjectTag));
+ vldr(d1, probe, HeapNumber::kValueOffset);
+ VFPCompareAndSetFlags(d0, d1);
+ b(ne, not_found); // The cache did not contain this value.
+ b(&load_result_from_cache);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ cmp(object, probe);
+ b(ne, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -3191,20 +3301,19 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
+ cmp(length, Operand(kPointerSize));
+ b(le, &byte_loop);
+
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
+ b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
@@ -3776,8 +3885,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3785,15 +3894,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
add(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Operand(new_space_start));
- b(lt, &no_memento_available);
+ b(lt, no_memento_found);
mov(ip, Operand(new_space_allocation_top));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
- b(gt, &no_memento_available);
+ b(gt, no_memento_found);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Operand(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 9abd5a0c3d..32471443bb 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -45,8 +45,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
+const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
+const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
+const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -169,17 +170,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -469,8 +459,13 @@ class MacroAssembler: public Assembler {
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
- void VFPCanonicalizeNaN(const DwVfpRegister value,
+ void VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
const Condition cond = al);
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al) {
+ VFPCanonicalizeNaN(value, value, cond);
+ }
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@@ -533,6 +528,8 @@ class MacroAssembler: public Assembler {
LowDwVfpRegister double_scratch1,
Label* not_int32);
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -541,7 +538,9 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register argument_count,
+ bool restore_context);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1037,11 +1036,18 @@ class MacroAssembler: public Assembler {
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1111,7 +1117,8 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_fp);
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1286,6 +1293,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
@@ -1360,9 +1379,20 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to eq
+ // If allocation info is present, condition flags are set to eq.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ b(eq, memento_found);
+ bind(&no_memento_found);
+ }
private:
void CallCFunctionHelper(Register function,
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 9f07489e1f..8d9d515c76 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -223,11 +223,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
// Labels used internally.
Label entry_label_;
Label start_label_;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index def1818630..461d032b99 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -912,6 +912,12 @@ double Simulator::get_double_from_register_pair(int reg) {
}
+void Simulator::set_register_pair_from_double(int reg, double* value) {
+ ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -1026,27 +1032,22 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
}
-// Runtime FP routines take up to two double arguments and zero
-// or one integer arguments. All are consructed here.
-// from r0-r3 or d0 and d1.
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = vfp_registers_[1];
- *z = registers_[1];
+ *x = get_double_from_d_register(0);
+ *y = get_double_from_d_register(1);
+ *z = get_register(0);
} else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
+ *x = get_double_from_register_pair(0);
// Register 2 and 3 -> y.
- OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
- OS::MemCopy(y, buffer, sizeof(*y));
+ *y = get_double_from_register_pair(2);
// Register 2 -> z
- memcpy(buffer, registers_ + 2, sizeof(*z));
- memcpy(z, buffer, sizeof(*z));
+ *z = get_register(2);
}
}
@@ -1718,32 +1719,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = vfp_registers_[2];
- arg3 = vfp_registers_[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@@ -3816,19 +3791,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
-double Simulator::CallFP(byte* entry, double d0, double d1) {
+void Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- OS::MemCopy(buffer, &d0, sizeof(d0));
- set_dw_register(0, buffer);
- OS::MemCopy(buffer, &d1, sizeof(d1));
- set_dw_register(2, buffer);
+ set_register_pair_from_double(0, &d0);
+ set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r0);
+ return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 7fca7432bf..e392c5cb36 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -163,6 +163,7 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@@ -220,7 +221,9 @@ class Simulator {
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -444,6 +447,10 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+#define CALL_GENERATED_FP_INT(entry, p0, p1) \
+ Simulator::current(Isolate::Current())->CallFPReturnsInt( \
+ FUNCTION_ADDR(entry), p0, p1)
+
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 085af3f2b7..004e067c82 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -380,31 +380,27 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
}
@@ -437,7 +433,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -457,19 +453,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// r0 : value
Label exit;
@@ -481,7 +477,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ LoadObject(scratch1, constant);
+ __ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
@@ -621,15 +617,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// r0 : value
Label exit;
@@ -740,9 +736,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Operand(name));
@@ -843,25 +839,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : ReturnValue default value
- // -- sp[20] : ReturnValue
- // -- sp[24] : last JS argument
+ // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
+ // : holder (set by CheckPrototypes)
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 5) * 4] : first JS argument
- // -- sp[(argc + 6) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(r5, function);
+ __ Move(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
+ __ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
- // Pass the additional arguments.
+ // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@@ -870,15 +867,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ Move(r6, call_data);
}
- __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate ReturnValue default and ReturnValue.
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
+ // Store call data.
+ __ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize));
+ // Store isolate.
+ __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ str(r5, MemOperand(sp, 4 * kPointerSize));
- __ str(r5, MemOperand(sp, 5 * kPointerSize));
+ __ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
+ __ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
- __ add(r2, sp, Operand(5 * kPointerSize));
+ __ mov(r2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -887,18 +887,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // r0 = v8::Arguments&
+ // r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args_
+ // FunctionCallbackInfo::implicit_args_
__ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ add(ip, r2, Operand(argc * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
+ // FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
+ // FunctionCallbackInfo::is_construct_call = 0
__ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
@@ -916,12 +916,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(fp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
+
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
r1,
kStackUnwindSpace,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -935,11 +942,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
+ typedef FunctionCallbackArguments FCA;
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ sub(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
- __ str(receiver, MemOperand(sp, 0));
+ __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ str(receiver, MemOperand(sp, index * kPointerSize));
@@ -950,7 +958,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ str(receiver, MemOperand(sp, index-- * kPointerSize));
}
- GenerateFastApiDirectCall(masm, optimization, argc);
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
@@ -1064,7 +1072,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -1202,8 +1211,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@@ -1262,7 +1272,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@@ -1294,9 +1304,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ b(success);
__ bind(miss);
@@ -1305,9 +1315,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ b(success);
GenerateRestoreName(masm(), miss, name);
@@ -1316,7 +1326,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1363,7 +1373,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+void LoadStubCompiler::NonexistentHandlerFrontend(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@@ -1383,10 +1393,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1402,36 +1412,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadObject(r0, value);
+ __ Move(r0, value);
__ Ret();
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
__ Move(scratch3(), callback);
__ ldr(scratch3(), FieldMemOperand(scratch3(),
@@ -1445,19 +1455,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), reg, name());
+ __ Push(scratch4(), reg);
+ __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
+ __ push(name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- const int kStackUnwindSpace = kFastApiCallArguments + 1;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@@ -1475,11 +1487,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
r2,
kStackUnwindSpace,
- 6);
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> interceptor_holder,
@@ -1839,15 +1852,15 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, &not_fast_object);
+ __ CheckFastObjectElements(r3, r9, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
+ __ CheckFastSmiElements(r3, r9, &call_builtin);
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, ip);
+ __ cmp(r9, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
@@ -1855,7 +1868,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
- r7,
+ r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@@ -1868,7 +1881,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
- r7,
+ r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@@ -1901,7 +1914,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&attempt_to_grow_elements);
// r0: array's length + 1.
- // r4: elements' length.
if (!FLAG_inline_new) {
__ b(&call_builtin);
@@ -1912,8 +1924,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
@@ -1925,8 +1937,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
+ __ mov(r4, Operand(new_space_allocation_top));
+ __ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
@@ -1938,7 +1950,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// We fit and could grow elements.
// Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
+ __ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
@@ -1949,6 +1961,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Update elements' and array's sizes.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
@@ -2539,7 +2552,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
@@ -2991,6 +3004,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -3002,7 +3016,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(r0);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index defd7342ab..e734986840 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
-var iteratorObjectSymbol = %CreateSymbol(void 0);
-var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
-var arrayIterationKindSymbol = %CreateSymbol(void 0);
+var iteratorObjectSymbol = %CreateSymbol(UNDEFINED);
+var arrayIteratorNextIndexSymbol = %CreateSymbol(UNDEFINED);
+var arrayIterationKindSymbol = %CreateSymbol(UNDEFINED);
function ArrayIterator() {}
@@ -74,7 +74,7 @@ function ArrayIteratorNext() {
if (index >= length) {
iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
- return CreateIteratorResultObject(void 0, true);
+ return CreateIteratorResultObject(UNDEFINED, true);
}
iterator[arrayIteratorNextIndexSymbol] = index + 1;
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 5f89ebb7a6..e98d7f5b53 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -399,14 +399,13 @@ function ObservedArrayPop(n) {
n--;
var value = this[n];
- EnqueueSpliceRecord(this, n, [value], 0);
-
try {
BeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [value], 0);
}
return value;
@@ -431,7 +430,7 @@ function ArrayPop() {
n--;
var value = this[n];
- delete this[n];
+ Delete(this, ToName(n), true);
this.length = n;
return value;
}
@@ -441,8 +440,6 @@ function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
- EnqueueSpliceRecord(this, n, [], m);
-
try {
BeginPerformSplice(this);
for (var i = 0; i < m; i++) {
@@ -451,6 +448,7 @@ function ObservedArrayPush() {
this.length = n + m;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [], m);
}
return this.length;
@@ -581,14 +579,13 @@ function ArrayReverse() {
function ObservedArrayShift(len) {
var first = this[0];
- EnqueueSpliceRecord(this, 0, [first], 0);
-
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
@@ -627,8 +624,6 @@ function ObservedArrayUnshift() {
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- EnqueueSpliceRecord(this, 0, [], num_arguments);
-
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
@@ -638,6 +633,7 @@ function ObservedArrayUnshift() {
this.length = len + num_arguments;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [], num_arguments);
}
return len + num_arguments;
@@ -681,7 +677,7 @@ function ArraySlice(start, end) {
var start_i = TO_INTEGER(start);
var end_i = len;
- if (end !== void 0) end_i = TO_INTEGER(end);
+ if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end);
if (start_i < 0) {
start_i += len;
@@ -1020,7 +1016,7 @@ function ArraySort(comparefn) {
var proto_length = indices;
for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
}
}
} else {
@@ -1028,7 +1024,7 @@ function ArraySort(comparefn) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) {
- obj[index] = void 0;
+ obj[index] = UNDEFINED;
}
}
}
@@ -1065,7 +1061,7 @@ function ArraySort(comparefn) {
if (first_undefined < last_defined) {
// Fill in hole or undefined.
obj[first_undefined] = obj[last_defined];
- obj[last_defined] = void 0;
+ obj[last_defined] = UNDEFINED;
}
}
// If there were any undefineds in the entire array, first_undefined
@@ -1077,12 +1073,12 @@ function ArraySort(comparefn) {
// an undefined should be and vice versa.
var i;
for (i = first_undefined; i < length - num_holes; i++) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in %GetPrototype(obj)) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
} else {
delete obj[i];
}
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index 4a4f570146..c5c98dbe4b 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -81,6 +81,10 @@ function ArrayBufferSlice(start, end) {
return result;
}
+function ArrayBufferIsView(obj) {
+ return %ArrayBufferIsView(obj);
+}
+
function SetUpArrayBuffer() {
%CheckIsBootstrapping();
@@ -93,6 +97,10 @@ function SetUpArrayBuffer() {
InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+ InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
+ "isView", ArrayBufferIsView
+ ));
+
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index fbff62dd65..9ed43601c5 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -98,6 +98,7 @@ struct DoubleConstant BASE_EMBEDDED {
double negative_infinity;
double canonical_non_hole_nan;
double the_hole_nan;
+ double uint32_bias;
};
static DoubleConstant double_constants;
@@ -207,6 +208,24 @@ CpuFeatureScope::~CpuFeatureScope() {
// -----------------------------------------------------------------------------
+// Implementation of PlatformFeatureScope
+
+PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
+ : old_cross_compile_(CpuFeatures::cross_compile_) {
+ // CpuFeatures is a global singleton, therefore this is only safe in
+ // single threaded code.
+ ASSERT(Serializer::enabled());
+ uint64_t mask = static_cast<uint64_t>(1) << f;
+ CpuFeatures::cross_compile_ |= mask;
+}
+
+
+PlatformFeatureScope::~PlatformFeatureScope() {
+ CpuFeatures::cross_compile_ = old_cross_compile_;
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
@@ -890,6 +909,8 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
+ double_constants.uint32_bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
math_exp_data_mutex = new Mutex();
}
@@ -1067,6 +1088,13 @@ ExternalReference ExternalReference::get_make_code_young_function(
}
+ExternalReference ExternalReference::get_mark_code_as_executed_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
+}
+
+
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@@ -1315,6 +1343,20 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
}
+ExternalReference ExternalReference::record_object_allocation_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate,
+ FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
+}
+
+
+ExternalReference ExternalReference::address_of_uint32_bias() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.uint32_bias));
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 6b399f2082..f0b7fed909 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -134,6 +134,18 @@ class CpuFeatureScope BASE_EMBEDDED {
};
+// Enable a unsupported feature within a scope for cross-compiling for a
+// different CPU.
+class PlatformFeatureScope BASE_EMBEDDED {
+ public:
+ explicit PlatformFeatureScope(CpuFeature f);
+ ~PlatformFeatureScope();
+
+ private:
+ uint64_t old_cross_compile_;
+};
+
+
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
@@ -389,6 +401,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(Cell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+ INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
@@ -715,6 +728,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
+ static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
+
+ // New heap objects tracking support.
+ static ExternalReference record_object_allocation_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
@@ -798,6 +815,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
+ static ExternalReference address_of_uint32_bias();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 823dedee09..843f8c8960 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -82,14 +82,13 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) {
}
-VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
- : Expression(isolate),
+VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
+ : Expression(isolate, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
- position_(RelocInfo::kNoPosition),
interface_(var->interface()) {
BindTo(var);
}
@@ -100,13 +99,12 @@ VariableProxy::VariableProxy(Isolate* isolate,
bool is_this,
Interface* interface,
int position)
- : Expression(isolate),
+ : Expression(isolate, position),
name_(name),
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
- position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsInternalizedString());
@@ -133,15 +131,15 @@ Assignment::Assignment(Isolate* isolate,
Expression* target,
Expression* value,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
target_(target),
value_(value),
- pos_(pos),
binary_operation_(NULL),
assignment_id_(GetNextId(isolate)),
is_monomorphic_(false),
is_uninitialized_(false),
+ is_pre_monomorphic_(false),
store_mode_(STANDARD_STORE) { }
@@ -234,33 +232,6 @@ bool ObjectLiteral::Property::emit_store() {
}
-bool IsEqualString(void* first, void* second) {
- ASSERT((*reinterpret_cast<String**>(first))->IsString());
- ASSERT((*reinterpret_cast<String**>(second))->IsString());
- Handle<String> h1(reinterpret_cast<String**>(first));
- Handle<String> h2(reinterpret_cast<String**>(second));
- return (*h1)->Equals(*h2);
-}
-
-
-bool IsEqualNumber(void* first, void* second) {
- ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
- ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
-
- Handle<Object> h1(reinterpret_cast<Object**>(first));
- Handle<Object> h2(reinterpret_cast<Object**>(second));
- if (h1->IsSmi()) {
- return h2->IsSmi() && *h1 == *h2;
- }
- if (h2->IsSmi()) return false;
- Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
- Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
- ASSERT(std::isfinite(n1->value()));
- ASSERT(std::isfinite(n2->value()));
- return n1->value() == n2->value();
-}
-
-
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);
@@ -456,14 +427,13 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
+ is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this);
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
+ ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
- StringLengthStub string_stub(Code::LOAD_IC, false);
- if (oracle->LoadIsStub(this, &string_stub)) {
- is_string_length_ = true;
- } else if (oracle->LoadIsStub(this, &proto_stub)) {
+ if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@@ -474,8 +444,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
- zone);
+ receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone);
} else if (oracle->LoadIsPolymorphic(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
@@ -490,7 +459,10 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
TypeFeedbackId id = AssignmentFeedbackId();
is_uninitialized_ = oracle->StoreIsUninitialized(id);
if (is_uninitialized_) return;
+
+ is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
+ ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
@@ -655,7 +627,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
@@ -1067,9 +1039,9 @@ CaseClause::CaseClause(Isolate* isolate,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
- : label_(label),
+ : AstNode(pos),
+ label_(label),
statements_(statements),
- position_(pos),
compare_type_(Type::None(), isolate),
compare_id_(AstNode::GetNextId(isolate)),
entry_id_(AstNode::GetNextId(isolate)) {
@@ -1111,6 +1083,7 @@ REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
+REGULAR_NODE(CaseClause)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ArrayLiteral)
@@ -1146,7 +1119,7 @@ DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
-DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
+DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index c63090687b..b4f7348eee 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -97,7 +97,7 @@ namespace internal {
#define EXPRESSION_NODE_LIST(V) \
V(FunctionLiteral) \
- V(SharedFunctionInfoLiteral) \
+ V(NativeFunctionLiteral) \
V(Conditional) \
V(VariableProxy) \
V(Literal) \
@@ -117,11 +117,15 @@ namespace internal {
V(CompareOperation) \
V(ThisFunction)
+#define AUXILIARY_NODE_LIST(V) \
+ V(CaseClause)
+
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
MODULE_NODE_LIST(V) \
STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V)
+ EXPRESSION_NODE_LIST(V) \
+ AUXILIARY_NODE_LIST(V)
// Forward declarations
class AstConstructionVisitor;
@@ -206,12 +210,12 @@ class AstNode: public ZoneObject {
return zone->New(static_cast<int>(size));
}
- AstNode() {}
-
+ explicit AstNode(int position): position_(position) {}
virtual ~AstNode() {}
virtual void Accept(AstVisitor* v) = 0;
virtual NodeType node_type() const = 0;
+ int position() const { return position_; }
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -248,21 +252,17 @@ class AstNode: public ZoneObject {
void* operator new(size_t size);
friend class CaseClause; // Generates AST IDs.
+
+ int position_;
};
class Statement : public AstNode {
public:
- Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+ explicit Statement(int position) : AstNode(position) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
};
@@ -329,11 +329,6 @@ class Expression : public AstNode {
kTest
};
- virtual int position() const {
- UNREACHABLE();
- return 0;
- }
-
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -387,8 +382,9 @@ class Expression : public AstNode {
TypeFeedbackId test_id() const { return test_id_; }
protected:
- explicit Expression(Isolate* isolate)
- : bounds_(Bounds::Unbounded(isolate)),
+ Expression(Isolate* isolate, int pos)
+ : AstNode(pos),
+ bounds_(Bounds::Unbounded(isolate)),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
@@ -431,8 +427,10 @@ class BreakableStatement : public Statement {
protected:
BreakableStatement(
- Isolate* isolate, ZoneStringList* labels, BreakableType breakable_type)
- : labels_(labels),
+ Isolate* isolate, ZoneStringList* labels,
+ BreakableType breakable_type, int position)
+ : Statement(position),
+ labels_(labels),
breakable_type_(breakable_type),
entry_id_(GetNextId(isolate)),
exit_id_(GetNextId(isolate)) {
@@ -473,8 +471,9 @@ class Block V8_FINAL : public BreakableStatement {
ZoneStringList* labels,
int capacity,
bool is_initializer_block,
+ int pos,
Zone* zone)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+ : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY, pos),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
scope_(NULL) {
@@ -498,8 +497,10 @@ class Declaration : public AstNode {
protected:
Declaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope)
- : proxy_(proxy),
+ Scope* scope,
+ int pos)
+ : AstNode(pos),
+ proxy_(proxy),
mode_(mode),
scope_(scope) {
ASSERT(IsDeclaredVariableMode(mode));
@@ -525,8 +526,9 @@ class VariableDeclaration V8_FINAL : public Declaration {
protected:
VariableDeclaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope)
- : Declaration(proxy, mode, scope) {
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, mode, scope, pos) {
}
};
@@ -545,8 +547,9 @@ class FunctionDeclaration V8_FINAL : public Declaration {
FunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
- Scope* scope)
- : Declaration(proxy, mode, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, mode, scope, pos),
fun_(fun) {
// At the moment there are no "const functions" in JavaScript...
ASSERT(mode == VAR || mode == LET);
@@ -570,8 +573,9 @@ class ModuleDeclaration V8_FINAL : public Declaration {
protected:
ModuleDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope)
- : Declaration(proxy, MODULE, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, MODULE, scope, pos),
module_(module) {
}
@@ -592,8 +596,9 @@ class ImportDeclaration V8_FINAL : public Declaration {
protected:
ImportDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope)
- : Declaration(proxy, LET, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, LET, scope, pos),
module_(module) {
}
@@ -611,8 +616,8 @@ class ExportDeclaration V8_FINAL : public Declaration {
}
protected:
- ExportDeclaration(VariableProxy* proxy, Scope* scope)
- : Declaration(proxy, LET, scope) {}
+ ExportDeclaration(VariableProxy* proxy, Scope* scope, int pos)
+ : Declaration(proxy, LET, scope, pos) {}
};
@@ -622,11 +627,13 @@ class Module : public AstNode {
Block* body() const { return body_; }
protected:
- explicit Module(Zone* zone)
- : interface_(Interface::NewModule(zone)),
+ Module(Zone* zone, int pos)
+ : AstNode(pos),
+ interface_(Interface::NewModule(zone)),
body_(NULL) {}
- explicit Module(Interface* interface, Block* body = NULL)
- : interface_(interface),
+ Module(Interface* interface, int pos, Block* body = NULL)
+ : AstNode(pos),
+ interface_(interface),
body_(body) {}
private:
@@ -640,7 +647,8 @@ class ModuleLiteral V8_FINAL : public Module {
DECLARE_NODE_TYPE(ModuleLiteral)
protected:
- ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
+ ModuleLiteral(Block* body, Interface* interface, int pos)
+ : Module(interface, pos, body) {}
};
@@ -651,7 +659,7 @@ class ModuleVariable V8_FINAL : public Module {
VariableProxy* proxy() const { return proxy_; }
protected:
- inline explicit ModuleVariable(VariableProxy* proxy);
+ inline ModuleVariable(VariableProxy* proxy, int pos);
private:
VariableProxy* proxy_;
@@ -666,8 +674,8 @@ class ModulePath V8_FINAL : public Module {
Handle<String> name() const { return name_; }
protected:
- ModulePath(Module* module, Handle<String> name, Zone* zone)
- : Module(zone),
+ ModulePath(Module* module, Handle<String> name, Zone* zone, int pos)
+ : Module(zone, pos),
module_(module),
name_(name) {
}
@@ -685,8 +693,8 @@ class ModuleUrl V8_FINAL : public Module {
Handle<String> url() const { return url_; }
protected:
- ModuleUrl(Handle<String> url, Zone* zone)
- : Module(zone), url_(url) {
+ ModuleUrl(Handle<String> url, Zone* zone, int pos)
+ : Module(zone, pos), url_(url) {
}
private:
@@ -702,8 +710,9 @@ class ModuleStatement V8_FINAL : public Statement {
Block* body() const { return body_; }
protected:
- ModuleStatement(VariableProxy* proxy, Block* body)
- : proxy_(proxy),
+ ModuleStatement(VariableProxy* proxy, Block* body, int pos)
+ : Statement(pos),
+ proxy_(proxy),
body_(body) {
}
@@ -730,8 +739,8 @@ class IterationStatement : public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ IterationStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
body_(NULL),
osr_entry_id_(GetNextId(isolate)) {
}
@@ -759,20 +768,14 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
Expression* cond() const { return cond_; }
- // Position where condition expression starts. We need it to make
- // the loop's condition a breakable location.
- int condition_position() { return condition_position_; }
- void set_condition_position(int pos) { condition_position_ = pos; }
-
virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ DoWhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
cond_(NULL),
- condition_position_(-1),
continue_id_(GetNextId(isolate)),
back_edge_id_(GetNextId(isolate)) {
}
@@ -780,8 +783,6 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
private:
Expression* cond_;
- int condition_position_;
-
const BailoutId continue_id_;
const BailoutId back_edge_id_;
};
@@ -809,8 +810,8 @@ class WhileStatement V8_FINAL : public IterationStatement {
BailoutId BodyId() const { return body_id_; }
protected:
- WhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ WhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
cond_(NULL),
may_have_function_literal_(true),
body_id_(GetNextId(isolate)) {
@@ -860,8 +861,8 @@ class ForStatement V8_FINAL : public IterationStatement {
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- ForStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ ForStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
init_(NULL),
cond_(NULL),
next_(NULL),
@@ -902,8 +903,8 @@ class ForEachStatement : public IterationStatement {
Expression* subject() const { return subject_; }
protected:
- ForEachStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ ForEachStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
each_(NULL),
subject_(NULL) {
}
@@ -933,8 +934,8 @@ class ForInStatement V8_FINAL : public ForEachStatement {
virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
- ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : ForEachStatement(isolate, labels),
+ ForInStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : ForEachStatement(isolate, labels, pos),
for_in_type_(SLOW_FOR_IN),
body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
@@ -994,8 +995,8 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- ForOfStatement(Isolate* isolate, ZoneStringList* labels)
- : ForEachStatement(isolate, labels),
+ ForOfStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : ForEachStatement(isolate, labels, pos),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
@@ -1020,8 +1021,8 @@ class ExpressionStatement V8_FINAL : public Statement {
virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
- explicit ExpressionStatement(Expression* expression)
- : expression_(expression) { }
+ ExpressionStatement(Expression* expression, int pos)
+ : Statement(pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1033,7 +1034,7 @@ class JumpStatement : public Statement {
virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
protected:
- JumpStatement() {}
+ explicit JumpStatement(int pos) : Statement(pos) {}
};
@@ -1044,8 +1045,8 @@ class ContinueStatement V8_FINAL : public JumpStatement {
IterationStatement* target() const { return target_; }
protected:
- explicit ContinueStatement(IterationStatement* target)
- : target_(target) { }
+ explicit ContinueStatement(IterationStatement* target, int pos)
+ : JumpStatement(pos), target_(target) { }
private:
IterationStatement* target_;
@@ -1059,8 +1060,8 @@ class BreakStatement V8_FINAL : public JumpStatement {
BreakableStatement* target() const { return target_; }
protected:
- explicit BreakStatement(BreakableStatement* target)
- : target_(target) { }
+ explicit BreakStatement(BreakableStatement* target, int pos)
+ : JumpStatement(pos), target_(target) { }
private:
BreakableStatement* target_;
@@ -1074,8 +1075,8 @@ class ReturnStatement V8_FINAL : public JumpStatement {
Expression* expression() const { return expression_; }
protected:
- explicit ReturnStatement(Expression* expression)
- : expression_(expression) { }
+ explicit ReturnStatement(Expression* expression, int pos)
+ : JumpStatement(pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1091,8 +1092,10 @@ class WithStatement V8_FINAL : public Statement {
Statement* statement() const { return statement_; }
protected:
- WithStatement(Scope* scope, Expression* expression, Statement* statement)
- : scope_(scope),
+ WithStatement(
+ Scope* scope, Expression* expression, Statement* statement, int pos)
+ : Statement(pos),
+ scope_(scope),
expression_(expression),
statement_(statement) { }
@@ -1103,12 +1106,9 @@ class WithStatement V8_FINAL : public Statement {
};
-class CaseClause V8_FINAL : public ZoneObject {
+class CaseClause V8_FINAL : public AstNode {
public:
- CaseClause(Isolate* isolate,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos);
+ DECLARE_NODE_TYPE(CaseClause)
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@@ -1118,9 +1118,6 @@ class CaseClause V8_FINAL : public ZoneObject {
Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
- int position() const { return position_; }
- void set_position(int pos) { position_ = pos; }
-
BailoutId EntryId() const { return entry_id_; }
// Type feedback information.
@@ -1129,10 +1126,14 @@ class CaseClause V8_FINAL : public ZoneObject {
Handle<Type> compare_type() { return compare_type_; }
private:
+ CaseClause(Isolate* isolate,
+ Expression* label,
+ ZoneList<Statement*>* statements,
+ int pos);
+
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- int position_;
Handle<Type> compare_type_;
const TypeFeedbackId compare_id_;
@@ -1158,8 +1159,8 @@ class SwitchStatement V8_FINAL : public BreakableStatement {
void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
protected:
- SwitchStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ SwitchStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
tag_(NULL),
cases_(NULL) { }
@@ -1199,8 +1200,10 @@ class IfStatement V8_FINAL : public Statement {
IfStatement(Isolate* isolate,
Expression* condition,
Statement* then_statement,
- Statement* else_statement)
- : condition_(condition),
+ Statement* else_statement,
+ int pos)
+ : Statement(pos),
+ condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
if_id_(GetNextId(isolate)),
@@ -1222,7 +1225,8 @@ class IfStatement V8_FINAL : public Statement {
// stack in the compiler; this should probably be reworked.
class TargetCollector V8_FINAL : public AstNode {
public:
- explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
+ explicit TargetCollector(Zone* zone)
+ : AstNode(RelocInfo::kNoPosition), targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
@@ -1252,8 +1256,9 @@ class TryStatement : public Statement {
ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
protected:
- TryStatement(int index, Block* try_block)
- : index_(index),
+ TryStatement(int index, Block* try_block, int pos)
+ : Statement(pos),
+ index_(index),
try_block_(try_block),
escaping_targets_(NULL) { }
@@ -1279,8 +1284,9 @@ class TryCatchStatement V8_FINAL : public TryStatement {
Block* try_block,
Scope* scope,
Variable* variable,
- Block* catch_block)
- : TryStatement(index, try_block),
+ Block* catch_block,
+ int pos)
+ : TryStatement(index, try_block, pos),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
@@ -1300,8 +1306,9 @@ class TryFinallyStatement V8_FINAL : public TryStatement {
Block* finally_block() const { return finally_block_; }
protected:
- TryFinallyStatement(int index, Block* try_block, Block* finally_block)
- : TryStatement(index, try_block),
+ TryFinallyStatement(
+ int index, Block* try_block, Block* finally_block, int pos)
+ : TryStatement(index, try_block, pos),
finally_block_(finally_block) { }
private:
@@ -1314,7 +1321,7 @@ class DebuggerStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- DebuggerStatement() {}
+ explicit DebuggerStatement(int pos): Statement(pos) {}
};
@@ -1323,7 +1330,7 @@ class EmptyStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- EmptyStatement() {}
+ explicit EmptyStatement(int pos): Statement(pos) {}
};
@@ -1380,8 +1387,9 @@ class Literal V8_FINAL : public Expression {
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(Isolate* isolate, Handle<Object> value)
- : Expression(isolate),
+ Literal(
+ Isolate* isolate, Handle<Object> value, int position)
+ : Expression(isolate, position),
value_(value),
isolate_(isolate) { }
@@ -1411,8 +1419,9 @@ class MaterializedLiteral : public Expression {
MaterializedLiteral(Isolate* isolate,
int literal_index,
bool is_simple,
- int depth)
- : Expression(isolate),
+ int depth,
+ int pos)
+ : Expression(isolate, pos),
literal_index_(literal_index),
is_simple_(is_simple),
depth_(depth) {}
@@ -1510,8 +1519,9 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
bool fast_elements,
int depth,
bool may_store_doubles,
- bool has_function)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
+ bool has_function,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos),
constant_properties_(constant_properties),
properties_(properties),
fast_elements_(fast_elements),
@@ -1539,8 +1549,9 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
RegExpLiteral(Isolate* isolate,
Handle<String> pattern,
Handle<String> flags,
- int literal_index)
- : MaterializedLiteral(isolate, literal_index, false, 1),
+ int literal_index,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, false, 1, pos),
pattern_(pattern),
flags_(flags) {}
@@ -1549,6 +1560,7 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
Handle<String> flags_;
};
+
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
class ArrayLiteral V8_FINAL : public MaterializedLiteral {
@@ -1569,8 +1581,9 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
- int depth)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
+ int depth,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos),
constant_elements_(constant_elements),
values_(values),
first_element_id_(ReserveIdRange(isolate, values->length())) {}
@@ -1603,7 +1616,6 @@ class VariableProxy V8_FINAL : public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
- int position() const { return position_; }
Interface* interface() const { return interface_; }
@@ -1614,7 +1626,7 @@ class VariableProxy V8_FINAL : public Expression {
void BindTo(Variable* var);
protected:
- VariableProxy(Isolate* isolate, Variable* var);
+ VariableProxy(Isolate* isolate, Variable* var, int position);
VariableProxy(Isolate* isolate,
Handle<String> name,
@@ -1629,7 +1641,6 @@ class VariableProxy V8_FINAL : public Expression {
// True if this variable proxy is being used in an assignment
// or with a increment/decrement operator.
bool is_lvalue_;
- int position_;
Interface* interface_;
};
@@ -1642,11 +1653,9 @@ class Property V8_FINAL : public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId LoadId() const { return load_id_; }
- bool IsStringLength() const { return is_string_length_; }
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
@@ -1660,6 +1669,10 @@ class Property V8_FINAL : public Expression {
return STANDARD_STORE;
}
bool IsUninitialized() { return is_uninitialized_; }
+ bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool HasNoTypeInformation() {
+ return is_uninitialized_ || is_pre_monomorphic_;
+ }
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
@@ -1667,27 +1680,25 @@ class Property V8_FINAL : public Expression {
Expression* obj,
Expression* key,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
obj_(obj),
key_(key),
- pos_(pos),
load_id_(GetNextId(isolate)),
is_monomorphic_(false),
+ is_pre_monomorphic_(false),
is_uninitialized_(false),
- is_string_length_(false),
is_string_access_(false),
is_function_prototype_(false) { }
private:
Expression* obj_;
Expression* key_;
- int pos_;
const BailoutId load_id_;
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
+ bool is_pre_monomorphic_ : 1;
bool is_uninitialized_ : 1;
- bool is_string_length_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
};
@@ -1699,7 +1710,6 @@ class Call V8_FINAL : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const V8_FINAL { return pos_; }
// Type feedback information.
TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
@@ -1754,10 +1764,9 @@ class Call V8_FINAL : public Expression {
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
expression_(expression),
arguments_(arguments),
- pos_(pos),
is_monomorphic_(false),
check_type_(RECEIVER_MAP_CHECK),
return_id_(GetNextId(isolate)) { }
@@ -1765,7 +1774,6 @@ class Call V8_FINAL : public Expression {
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- int pos_;
bool is_monomorphic_;
CheckType check_type_;
@@ -1784,7 +1792,6 @@ class CallNew V8_FINAL : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
@@ -1803,10 +1810,9 @@ class CallNew V8_FINAL : public Expression {
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
expression_(expression),
arguments_(arguments),
- pos_(pos),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
return_id_(GetNextId(isolate)) { }
@@ -1814,7 +1820,6 @@ class CallNew V8_FINAL : public Expression {
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- int pos_;
bool is_monomorphic_;
Handle<JSFunction> target_;
@@ -1844,8 +1849,9 @@ class CallRuntime V8_FINAL : public Expression {
CallRuntime(Isolate* isolate,
Handle<String> name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments)
- : Expression(isolate),
+ ZoneList<Expression*>* arguments,
+ int pos)
+ : Expression(isolate, pos),
name_(name),
function_(function),
arguments_(arguments) { }
@@ -1863,7 +1869,6 @@ class UnaryOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId MaterializeTrueId() { return materialize_true_id_; }
BailoutId MaterializeFalseId() { return materialize_false_id_; }
@@ -1876,10 +1881,9 @@ class UnaryOperation V8_FINAL : public Expression {
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
expression_(expression),
- pos_(pos),
materialize_true_id_(GetNextId(isolate)),
materialize_false_id_(GetNextId(isolate)) {
ASSERT(Token::IsUnaryOp(op));
@@ -1888,7 +1892,6 @@ class UnaryOperation V8_FINAL : public Expression {
private:
Token::Value op_;
Expression* expression_;
- int pos_;
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
@@ -1906,7 +1909,6 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId RightId() const { return right_id_; }
@@ -1923,11 +1925,10 @@ class BinaryOperation V8_FINAL : public Expression {
Expression* left,
Expression* right,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
left_(left),
right_(right),
- pos_(pos),
right_id_(GetNextId(isolate)) {
ASSERT(Token::IsBinaryOp(op));
}
@@ -1936,7 +1937,6 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
- int pos_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS.
@@ -1961,7 +1961,6 @@ class CountOperation V8_FINAL : public Expression {
}
Expression* expression() const { return expression_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
@@ -1971,7 +1970,7 @@ class CountOperation V8_FINAL : public Expression {
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
- TypeInfo type() const { return type_; }
+ Handle<Type> type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -1984,13 +1983,12 @@ class CountOperation V8_FINAL : public Expression {
bool is_prefix,
Expression* expr,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
is_prefix_(is_prefix),
is_monomorphic_(false),
store_mode_(STANDARD_STORE),
expression_(expr),
- pos_(pos),
assignment_id_(GetNextId(isolate)),
count_id_(GetNextId(isolate)) {}
@@ -2000,10 +1998,9 @@ class CountOperation V8_FINAL : public Expression {
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
- TypeInfo type_;
+ Handle<Type> type_;
Expression* expression_;
- int pos_;
const BailoutId assignment_id_;
const TypeFeedbackId count_id_;
SmallMapList receiver_types_;
@@ -2017,7 +2014,6 @@ class CompareOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
@@ -2035,12 +2031,11 @@ class CompareOperation V8_FINAL : public Expression {
Expression* left,
Expression* right,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
left_(left),
right_(right),
- pos_(pos),
- combined_type_(Type::Null(), isolate) {
+ combined_type_(Type::None(), isolate) {
ASSERT(Token::IsCompareOp(op));
}
@@ -2048,7 +2043,6 @@ class CompareOperation V8_FINAL : public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
- int pos_;
Handle<Type> combined_type_;
};
@@ -2062,9 +2056,6 @@ class Conditional V8_FINAL : public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
- int then_expression_position() const { return then_expression_position_; }
- int else_expression_position() const { return else_expression_position_; }
-
BailoutId ThenId() const { return then_id_; }
BailoutId ElseId() const { return else_id_; }
@@ -2073,14 +2064,11 @@ class Conditional V8_FINAL : public Expression {
Expression* condition,
Expression* then_expression,
Expression* else_expression,
- int then_expression_position,
- int else_expression_position)
- : Expression(isolate),
+ int position)
+ : Expression(isolate, position),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
- then_expression_position_(then_expression_position),
- else_expression_position_(else_expression_position),
then_id_(GetNextId(isolate)),
else_id_(GetNextId(isolate)) { }
@@ -2088,8 +2076,6 @@ class Conditional V8_FINAL : public Expression {
Expression* condition_;
Expression* then_expression_;
Expression* else_expression_;
- int then_expression_position_;
- int else_expression_position_;
const BailoutId then_id_;
const BailoutId else_id_;
};
@@ -2106,7 +2092,6 @@ class Assignment V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -2119,6 +2104,10 @@ class Assignment V8_FINAL : public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
bool IsUninitialized() { return is_uninitialized_; }
+ bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool HasNoTypeInformation() {
+ return is_uninitialized_ || is_pre_monomorphic_;
+ }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
}
@@ -2137,8 +2126,8 @@ class Assignment V8_FINAL : public Expression {
void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
ASSERT(Token::IsAssignmentOp(op_));
if (is_compound()) {
- binary_operation_ =
- factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
+ binary_operation_ = factory->NewBinaryOperation(
+ binary_op(), target_, value_, position() + 1);
}
}
@@ -2146,12 +2135,12 @@ class Assignment V8_FINAL : public Expression {
Token::Value op_;
Expression* target_;
Expression* value_;
- int pos_;
BinaryOperation* binary_operation_;
const BailoutId assignment_id_;
bool is_monomorphic_ : 1;
bool is_uninitialized_ : 1;
+ bool is_pre_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
SmallMapList receiver_types_;
@@ -2172,7 +2161,6 @@ class Yield V8_FINAL : public Expression {
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
// Delegating yield surrounds the "yield" in a "try/catch". This index
// locates the catch handler in the handler table, and is equivalent to
@@ -2192,19 +2180,17 @@ class Yield V8_FINAL : public Expression {
Expression* expression,
Kind yield_kind,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
- index_(-1),
- pos_(pos) { }
+ index_(-1) { }
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
int index_;
- int pos_;
};
@@ -2213,15 +2199,13 @@ class Throw V8_FINAL : public Expression {
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
protected:
Throw(Isolate* isolate, Expression* exception, int pos)
- : Expression(isolate), exception_(exception), pos_(pos) {}
+ : Expression(isolate, pos), exception_(exception) {}
private:
Expression* exception_;
- int pos_;
};
@@ -2336,8 +2320,9 @@ class FunctionLiteral V8_FINAL : public Expression {
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
IsParenthesizedFlag is_parenthesized,
- IsGeneratorFlag is_generator)
- : Expression(isolate),
+ IsGeneratorFlag is_generator,
+ int position)
+ : Expression(isolate, position),
name_(name),
scope_(scope),
body_(body),
@@ -2383,23 +2368,21 @@ class FunctionLiteral V8_FINAL : public Expression {
};
-class SharedFunctionInfoLiteral V8_FINAL : public Expression {
+class NativeFunctionLiteral V8_FINAL : public Expression {
public:
- DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
+ DECLARE_NODE_TYPE(NativeFunctionLiteral)
- Handle<SharedFunctionInfo> shared_function_info() const {
- return shared_function_info_;
- }
+ Handle<String> name() const { return name_; }
+ v8::Extension* extension() const { return extension_; }
protected:
- SharedFunctionInfoLiteral(
- Isolate* isolate,
- Handle<SharedFunctionInfo> shared_function_info)
- : Expression(isolate),
- shared_function_info_(shared_function_info) { }
+ NativeFunctionLiteral(
+ Isolate* isolate, Handle<String> name, v8::Extension* extension, int pos)
+ : Expression(isolate, pos), name_(name), extension_(extension) {}
private:
- Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<String> name_;
+ v8::Extension* extension_;
};
@@ -2408,7 +2391,7 @@ class ThisFunction V8_FINAL : public Expression {
DECLARE_NODE_TYPE(ThisFunction)
protected:
- explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
+ explicit ThisFunction(Isolate* isolate, int pos): Expression(isolate, pos) {}
};
#undef DECLARE_NODE_TYPE
@@ -2775,8 +2758,8 @@ class RegExpEmpty V8_FINAL : public RegExpTree {
// ----------------------------------------------------------------------------
// Out-of-line inline constructors (to side-step cyclic dependencies).
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
- : Module(proxy->interface()),
+inline ModuleVariable::ModuleVariable(VariableProxy* proxy, int pos)
+ : Module(proxy->interface(), pos),
proxy_(proxy) {
}
@@ -2893,75 +2876,81 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
VariableDeclaration* decl =
- new(zone_) VariableDeclaration(proxy, mode, scope);
+ new(zone_) VariableDeclaration(proxy, mode, scope, pos);
VISIT_AND_RETURN(VariableDeclaration, decl)
}
FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
FunctionDeclaration* decl =
- new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
+ new(zone_) FunctionDeclaration(proxy, mode, fun, scope, pos);
VISIT_AND_RETURN(FunctionDeclaration, decl)
}
ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ModuleDeclaration* decl =
- new(zone_) ModuleDeclaration(proxy, module, scope);
+ new(zone_) ModuleDeclaration(proxy, module, scope, pos);
VISIT_AND_RETURN(ModuleDeclaration, decl)
}
ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ImportDeclaration* decl =
- new(zone_) ImportDeclaration(proxy, module, scope);
+ new(zone_) ImportDeclaration(proxy, module, scope, pos);
VISIT_AND_RETURN(ImportDeclaration, decl)
}
ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ExportDeclaration* decl =
- new(zone_) ExportDeclaration(proxy, scope);
+ new(zone_) ExportDeclaration(proxy, scope, pos);
VISIT_AND_RETURN(ExportDeclaration, decl)
}
- ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
- ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
+ ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
+ ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface, pos);
VISIT_AND_RETURN(ModuleLiteral, module)
}
- ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
- ModuleVariable* module = new(zone_) ModuleVariable(proxy);
+ ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
+ ModuleVariable* module = new(zone_) ModuleVariable(proxy, pos);
VISIT_AND_RETURN(ModuleVariable, module)
}
- ModulePath* NewModulePath(Module* origin, Handle<String> name) {
- ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
+ ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) {
+ ModulePath* module = new(zone_) ModulePath(origin, name, zone_, pos);
VISIT_AND_RETURN(ModulePath, module)
}
- ModuleUrl* NewModuleUrl(Handle<String> url) {
- ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
+ ModuleUrl* NewModuleUrl(Handle<String> url, int pos) {
+ ModuleUrl* module = new(zone_) ModuleUrl(url, zone_, pos);
VISIT_AND_RETURN(ModuleUrl, module)
}
Block* NewBlock(ZoneStringList* labels,
int capacity,
- bool is_initializer_block) {
+ bool is_initializer_block,
+ int pos) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, zone_);
+ isolate_, labels, capacity, is_initializer_block, pos, zone_);
VISIT_AND_RETURN(Block, block)
}
#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZoneStringList* labels) { \
- NodeType* stmt = new(zone_) NodeType(isolate_, labels); \
+ NodeType* New##NodeType(ZoneStringList* labels, int pos) { \
+ NodeType* stmt = new(zone_) NodeType(isolate_, labels, pos); \
VISIT_AND_RETURN(NodeType, stmt); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
@@ -2971,14 +2960,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
#undef STATEMENT_WITH_LABELS
ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
- ZoneStringList* labels) {
+ ZoneStringList* labels,
+ int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels);
+ ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels, pos);
VISIT_AND_RETURN(ForInStatement, stmt);
}
case ForEachStatement::ITERATE: {
- ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels);
+ ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels, pos);
VISIT_AND_RETURN(ForOfStatement, stmt);
}
}
@@ -2986,44 +2976,47 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
return NULL;
}
- ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
- ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
+ ModuleStatement* NewModuleStatement(
+ VariableProxy* proxy, Block* body, int pos) {
+ ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body, pos);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
- ExpressionStatement* NewExpressionStatement(Expression* expression) {
- ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
+ ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
+ ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression, pos);
VISIT_AND_RETURN(ExpressionStatement, stmt)
}
- ContinueStatement* NewContinueStatement(IterationStatement* target) {
- ContinueStatement* stmt = new(zone_) ContinueStatement(target);
+ ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
+ ContinueStatement* stmt = new(zone_) ContinueStatement(target, pos);
VISIT_AND_RETURN(ContinueStatement, stmt)
}
- BreakStatement* NewBreakStatement(BreakableStatement* target) {
- BreakStatement* stmt = new(zone_) BreakStatement(target);
+ BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
+ BreakStatement* stmt = new(zone_) BreakStatement(target, pos);
VISIT_AND_RETURN(BreakStatement, stmt)
}
- ReturnStatement* NewReturnStatement(Expression* expression) {
- ReturnStatement* stmt = new(zone_) ReturnStatement(expression);
+ ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
+ ReturnStatement* stmt = new(zone_) ReturnStatement(expression, pos);
VISIT_AND_RETURN(ReturnStatement, stmt)
}
WithStatement* NewWithStatement(Scope* scope,
Expression* expression,
- Statement* statement) {
+ Statement* statement,
+ int pos) {
WithStatement* stmt = new(zone_) WithStatement(
- scope, expression, statement);
+ scope, expression, statement, pos);
VISIT_AND_RETURN(WithStatement, stmt)
}
IfStatement* NewIfStatement(Expression* condition,
Statement* then_statement,
- Statement* else_statement) {
+ Statement* else_statement,
+ int pos) {
IfStatement* stmt = new(zone_) IfStatement(
- isolate_, condition, then_statement, else_statement);
+ isolate_, condition, then_statement, else_statement, pos);
VISIT_AND_RETURN(IfStatement, stmt)
}
@@ -3031,36 +3024,45 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Block* try_block,
Scope* scope,
Variable* variable,
- Block* catch_block) {
+ Block* catch_block,
+ int pos) {
TryCatchStatement* stmt = new(zone_) TryCatchStatement(
- index, try_block, scope, variable, catch_block);
+ index, try_block, scope, variable, catch_block, pos);
VISIT_AND_RETURN(TryCatchStatement, stmt)
}
TryFinallyStatement* NewTryFinallyStatement(int index,
Block* try_block,
- Block* finally_block) {
+ Block* finally_block,
+ int pos) {
TryFinallyStatement* stmt =
- new(zone_) TryFinallyStatement(index, try_block, finally_block);
+ new(zone_) TryFinallyStatement(index, try_block, finally_block, pos);
VISIT_AND_RETURN(TryFinallyStatement, stmt)
}
- DebuggerStatement* NewDebuggerStatement() {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement();
+ DebuggerStatement* NewDebuggerStatement(int pos) {
+ DebuggerStatement* stmt = new(zone_) DebuggerStatement(pos);
VISIT_AND_RETURN(DebuggerStatement, stmt)
}
- EmptyStatement* NewEmptyStatement() {
- return new(zone_) EmptyStatement();
+ EmptyStatement* NewEmptyStatement(int pos) {
+ return new(zone_) EmptyStatement(pos);
+ }
+
+ CaseClause* NewCaseClause(
+ Expression* label, ZoneList<Statement*>* statements, int pos) {
+ CaseClause* clause =
+ new(zone_) CaseClause(isolate_, label, statements, pos);
+ VISIT_AND_RETURN(CaseClause, clause)
}
- Literal* NewLiteral(Handle<Object> handle) {
- Literal* lit = new(zone_) Literal(isolate_, handle);
+ Literal* NewLiteral(Handle<Object> handle, int pos) {
+ Literal* lit = new(zone_) Literal(isolate_, handle, pos);
VISIT_AND_RETURN(Literal, lit)
}
- Literal* NewNumberLiteral(double number) {
- return NewLiteral(isolate_->factory()->NewNumber(number, TENURED));
+ Literal* NewNumberLiteral(double number, int pos) {
+ return NewLiteral(isolate_->factory()->NewNumber(number, TENURED), pos);
}
ObjectLiteral* NewObjectLiteral(
@@ -3071,26 +3073,29 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
bool fast_elements,
int depth,
bool may_store_doubles,
- bool has_function) {
+ bool has_function,
+ int pos) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, may_store_doubles, has_function);
+ is_simple, fast_elements, depth, may_store_doubles, has_function, pos);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value) {
+ FunctionLiteral* value,
+ int pos) {
ObjectLiteral::Property* prop =
new(zone_) ObjectLiteral::Property(is_getter, value);
- prop->set_key(NewLiteral(value->name()));
+ prop->set_key(NewLiteral(value->name(), pos));
return prop; // Not an AST node, will not be visited.
}
RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
Handle<String> flags,
- int literal_index) {
+ int literal_index,
+ int pos) {
RegExpLiteral* lit =
- new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index);
+ new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index, pos);
VISIT_AND_RETURN(RegExpLiteral, lit);
}
@@ -3098,14 +3103,17 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
- int depth) {
+ int depth,
+ int pos) {
ArrayLiteral* lit = new(zone_) ArrayLiteral(
- isolate_, constant_elements, values, literal_index, is_simple, depth);
+ isolate_, constant_elements, values, literal_index, is_simple,
+ depth, pos);
VISIT_AND_RETURN(ArrayLiteral, lit)
}
- VariableProxy* NewVariableProxy(Variable* var) {
- VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var);
+ VariableProxy* NewVariableProxy(Variable* var,
+ int pos = RelocInfo::kNoPosition) {
+ VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var, pos);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -3139,9 +3147,10 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
CallRuntime* NewCallRuntime(Handle<String> name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments) {
+ ZoneList<Expression*>* arguments,
+ int pos) {
CallRuntime* call =
- new(zone_) CallRuntime(isolate_, name, function, arguments);
+ new(zone_) CallRuntime(isolate_, name, function, arguments, pos);
VISIT_AND_RETURN(CallRuntime, call)
}
@@ -3183,11 +3192,9 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Conditional* NewConditional(Expression* condition,
Expression* then_expression,
Expression* else_expression,
- int then_expression_position,
- int else_expression_position) {
+ int position) {
Conditional* cond = new(zone_) Conditional(
- isolate_, condition, then_expression, else_expression,
- then_expression_position, else_expression_position);
+ isolate_, condition, then_expression, else_expression, position);
VISIT_AND_RETURN(Conditional, cond)
}
@@ -3227,12 +3234,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::IsParenthesizedFlag is_parenthesized,
- FunctionLiteral::IsGeneratorFlag is_generator) {
+ FunctionLiteral::IsGeneratorFlag is_generator,
+ int position) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
parameter_count, function_type, has_duplicate_parameters, is_function,
- is_parenthesized, is_generator);
+ is_parenthesized, is_generator, position);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit);
@@ -3240,15 +3248,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
return lit;
}
- SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral(
- Handle<SharedFunctionInfo> shared_function_info) {
- SharedFunctionInfoLiteral* lit =
- new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info);
- VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit)
+ NativeFunctionLiteral* NewNativeFunctionLiteral(
+ Handle<String> name, v8::Extension* extension, int pos) {
+ NativeFunctionLiteral* lit =
+ new(zone_) NativeFunctionLiteral(isolate_, name, extension, pos);
+ VISIT_AND_RETURN(NativeFunctionLiteral, lit)
}
- ThisFunction* NewThisFunction() {
- ThisFunction* fun = new(zone_) ThisFunction(isolate_);
+ ThisFunction* NewThisFunction(int pos) {
+ ThisFunction* fun = new(zone_) ThisFunction(isolate_, pos);
VISIT_AND_RETURN(ThisFunction, fun)
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 0756aefb0b..234a2118bd 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -824,7 +824,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// work in the snapshot case is done in HookUpInnerGlobal.
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
- // --- G l o b a l C o n t e x t ---
+ // --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
native_context()->set_previous(NULL);
@@ -1043,7 +1043,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // -- J S O N
- Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
+ Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
JSFunction::SetInstancePrototype(cons,
@@ -2067,6 +2067,11 @@ bool Genesis::InstallExperimentalNatives() {
"native harmony-array.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_maths &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native harmony-math.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
}
InstallExperimentalNativeFunctions();
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 9290852dc9..b614904c9f 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -195,79 +195,6 @@ BUILTIN(EmptyFunction) {
}
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
- Isolate* isolate,
- JSFunction* constructor) {
- ASSERT(args->length() >= 1);
- Heap* heap = isolate->heap();
- isolate->counters()->array_function_runtime()->Increment();
-
- JSArray* array;
- if (CalledAsConstructor(isolate)) {
- array = JSArray::cast((*args)[0]);
- // Initialize elements and length in case later allocations fail so that the
- // array object is initialized in a valid state.
- MaybeObject* maybe_array = array->Initialize(0);
- if (maybe_array->IsFailure()) return maybe_array;
-
- AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
- if (memento != NULL && memento->IsValid()) {
- AllocationSite* site = memento->GetAllocationSite();
- ElementsKind to_kind = site->GetElementsKind();
- if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
- to_kind)) {
- // We have advice that we should change the elements kind
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
- reinterpret_cast<void*>(array),
- ElementsKindToString(array->GetElementsKind()),
- ElementsKindToString(to_kind));
- }
-
- maybe_array = array->TransitionElementsKind(to_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- }
-
- if (!FLAG_smi_only_arrays) {
- Context* native_context = isolate->context()->native_context();
- if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !native_context->js_array_maps()->IsUndefined()) {
- FixedArray* map_array =
- FixedArray::cast(native_context->js_array_maps());
- array->set_map(Map::cast(map_array->
- get(TERMINAL_FAST_ELEMENTS_KIND)));
- }
- }
- } else {
- // Allocate the JS Array
- MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
- if (!maybe_obj->To(&array)) return maybe_obj;
- }
-
- Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
- ASSERT(adjusted_arguments.length() < 1 ||
- adjusted_arguments[0] == (*args)[1]);
- return ArrayConstructInitializeElements(array, &adjusted_arguments);
-}
-
-
-BUILTIN(InternalArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->internal_array_function());
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->array_function());
-}
-
-
static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
@@ -346,10 +273,20 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
- HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
- elms->address() + size_delta));
- return FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + to_trim * entry_size));
+ FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
+ elms->address() + size_delta));
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_profiling()) {
+ profiler->ObjectMoveEvent(elms->address(),
+ new_elms->address(),
+ new_elms->Size());
+ if (profiler->is_tracking_allocations()) {
+ // Report filler object as a new allocation.
+ // Otherwise it will become an untracked object.
+ profiler->NewObjectEvent(elms->address(), elms->Size());
+ }
+ }
+ return new_elms;
}
@@ -1392,7 +1329,8 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
+ LoadStubCompiler::GenerateLoadViaGetter(
+ masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
}
@@ -1451,6 +1389,11 @@ static void Generate_StoreIC_Slow(MacroAssembler* masm) {
}
+static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) {
+ StoreIC::GenerateSlow(masm);
+}
+
+
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@@ -1546,6 +1489,11 @@ static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
}
+static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSlow(masm);
+}
+
+
static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@@ -1728,8 +1676,19 @@ void Builtins::InitBuiltinFunctionTable() {
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
+#define DEF_FUNCTION_PTR_H(aname, kind, extra) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeFlags( \
+ Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \
+ functions->extra_args = NO_EXTRA_ARGUMENTS; \
+ ++functions;
+
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
@@ -1854,8 +1813,15 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
+#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \
+Handle<Code> Builtins::name() { \
+ Code** code_address = \
+ reinterpret_cast<Code**>(builtin_address(k##name)); \
+ return Handle<Code>(code_address); \
+}
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index c712f1ee02..9b589d843d 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -50,6 +50,10 @@ enum BuiltinExtraArguments {
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+#define CODE_AGE_LIST_WITH_NO_AGE(V) \
+ V(NoAge) \
+ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState) \
@@ -63,9 +67,6 @@ enum BuiltinExtraArguments {
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
- V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- \
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
@@ -111,8 +112,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -120,30 +119,20 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
- Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
- V(LoadIC_Slow, STUB, MONOMORPHIC, \
- Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -162,8 +151,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Generic, STORE_IC, GENERIC, \
@@ -176,8 +163,6 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
- V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
@@ -219,10 +204,29 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
+// Define list of builtin handlers implemented in assembly.
+#define BUILTIN_LIST_H(V) \
+ V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \
+ V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \
+ V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \
+ V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \
+ V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\
+ V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \
+ V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \
+ V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \
+ V(StoreIC_Normal_Strict, STORE_IC, kStrictMode)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
@@ -310,8 +314,10 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
+#define DEF_ENUM_H(name, kind, extra) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
+ BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
@@ -335,8 +341,10 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
+ BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
@@ -391,7 +399,6 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
@@ -403,7 +410,7 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
-
+ static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
@@ -415,6 +422,9 @@ class Builtins {
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
+ static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
+ static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
+
static void InitBuiltinFunctionTable();
bool initialized_;
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 7108d18892..e08cd7c685 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -25,11 +25,48 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdarg.h>
+#include "checks.h"
-#include "v8.h"
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
#include "platform.h"
+#include "v8.h"
+
+
+// Attempts to dump a backtrace (if supported).
+static V8_INLINE void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ i::OS::PrintError("(empty)\n");
+ } else if (symbols == NULL) {
+ i::OS::PrintError("(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ i::OS::PrintError("%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ int status;
+ size_t length;
+ char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+ i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ free(demangled);
+ } else {
+ i::OS::PrintError("??\n");
+ }
+ }
+ }
+ free(symbols);
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+}
+
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@@ -43,7 +80,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
- i::OS::DumpBacktrace();
+ DumpBacktrace();
+ fflush(stderr);
i::OS::Abort();
}
@@ -91,8 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) {
namespace v8 { namespace internal {
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index f5c5f232bd..9d2db28d8f 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -272,7 +272,24 @@ template <int> class StaticAssertionHelper { };
#endif
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS 1
+#endif
+#endif
+
+namespace v8 {
+namespace internal {
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+const bool FLAG_enable_slow_asserts = false;
+#endif
+} // namespace internal
+} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@@ -285,7 +302,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@@ -294,7 +310,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 23d4269c84..dfa5ecd8cf 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -146,14 +146,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
int param_count = descriptor_->register_param_count_;
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
- current_block()->Goto(next_block);
+ Goto(next_block);
next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block);
- HConstant* undefined_constant =
- Add<HConstant>(isolate()->factory()->undefined_value());
- graph()->set_undefined_constant(undefined_constant);
-
for (int i = 0; i < param_count; ++i) {
HParameter* param =
Add<HParameter>(i, HParameter::REGISTER_PARAMETER);
@@ -162,7 +158,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
}
HInstruction* stack_parameter_count;
- if (descriptor_->stack_parameter_count_ != NULL) {
+ if (descriptor_->stack_parameter_count_.is_valid()) {
ASSERT(descriptor_->environment_length() == (param_count + 1));
stack_parameter_count = New<HParameter>(param_count,
HParameter::REGISTER_PARAMETER,
@@ -178,8 +174,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
arguments_length_ = graph()->GetConstant0();
}
- context_ = New<HContext>();
- AddInstruction(context_);
+ context_ = Add<HContext>();
start_environment->BindContext(context_);
Add<HSimulate>(BailoutId::StubEntry());
@@ -207,8 +202,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
if (current_block() != NULL) {
HReturn* hreturn_instruction = New<HReturn>(return_value,
stack_pop_count);
- current_block()->Finish(hreturn_instruction);
- set_current_block(NULL);
+ FinishCurrentBlock(hreturn_instruction);
}
return true;
}
@@ -298,12 +292,21 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
- ASSERT(descriptor->stack_parameter_count_ == NULL);
+ ASSERT(!descriptor->stack_parameter_count_.is_valid());
return stub->GenerateLightweightMissCode(isolate);
}
+ ElapsedTimer timer;
+ if (FLAG_profile_hydrogen_code_stub_compilation) {
+ timer.Start();
+ }
CodeStubGraphBuilder<Stub> builder(isolate, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen();
+ Handle<Code> code = chunk->Codegen();
+ if (FLAG_profile_hydrogen_code_stub_compilation) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
+ }
+ return code;
}
@@ -339,6 +342,19 @@ Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
template <>
+HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
+ info()->MarkAsSavesCallerDoubles();
+ HValue* number = GetParameter(NumberToStringStub::kNumber);
+ return BuildNumberToString(number, handle(Type::Number(), isolate()));
+}
+
+
+Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
@@ -355,42 +371,48 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
undefined);
checker.Then();
- HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
+ HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset);
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HValue* push_value;
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- 0/*copy-on-write*/));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ 0/*copy-on-write*/);
+ environment()->Push(push_value);
if_fixed_cow.Else();
IfBuilder if_fixed(this);
if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ length);
+ environment()->Push(push_value);
if_fixed.Else();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_DOUBLE_ELEMENTS,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_DOUBLE_ELEMENTS,
+ length);
+ environment()->Push(push_value);
} else {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- elements_kind,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ elements_kind,
+ length);
+ environment()->Push(push_value);
}
checker.ElseDeopt("Uninitialized boilerplate literals");
@@ -407,23 +429,33 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
- Zone* zone = this->zone();
HValue* undefined = graph()->GetConstantUndefined();
- HInstruction* boilerplate = Add<HLoadKeyed>(GetParameter(0),
- GetParameter(1),
- static_cast<HValue*>(NULL),
- FAST_ELEMENTS);
+ HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
+ GetParameter(1),
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
IfBuilder checker(this);
- checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate,
+ checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
undefined);
checker.And();
+ HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset);
+ HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
- HValue* boilerplate_size =
- AddInstruction(new(zone) HInstanceSize(boilerplate));
- HValue* size_in_words = Add<HConstant>(size >> kPointerSizeLog2);
+ int object_size = size;
+ if (FLAG_allocation_site_pretenuring) {
+ size += AllocationMemento::kSize;
+ }
+
+ HValue* boilerplate_map = Add<HLoadNamedField>(
+ boilerplate, HObjectAccess::ForMap());
+ HValue* boilerplate_size = Add<HLoadNamedField>(
+ boilerplate_map, HObjectAccess::ForMapInstanceSize());
+ HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
checker.Then();
@@ -433,12 +465,17 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
- for (int i = 0; i < size; i += kPointerSize) {
+ for (int i = 0; i < object_size; i += kPointerSize) {
HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
Add<HStoreNamedField>(object, access,
Add<HLoadNamedField>(boilerplate, access));
}
+ ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
+ if (FLAG_allocation_site_pretenuring) {
+ BuildCreateAllocationMemento(object, object_size, allocation_site);
+ }
+
environment()->Push(object);
checker.ElseDeopt("Uninitialized boilerplate in fast clone");
checker.End();
@@ -459,24 +496,39 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
JS_OBJECT_TYPE);
// Store the map
- Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(),
- isolate());
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
AddStoreMapConstant(object, allocation_site_map);
// Store the payload (smi elements kind)
HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind());
Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteTransitionInfo(),
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset),
initial_elements_kind);
+ // Unlike literals, constructed arrays don't have nested sites
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kNestedSiteOffset),
+ graph()->GetConstant0());
+
+ // Store an empty fixed array for the code dependency.
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ HStoreNamedField* store = Add<HStoreNamedField>(
+ object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kDependentCodeOffset),
+ empty_fixed_array);
+
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
HValue* site = Add<HLoadNamedField>(site_list,
HObjectAccess::ForAllocationSiteList());
- HStoreNamedField* store =
- Add<HStoreNamedField>(object, HObjectAccess::ForAllocationSiteWeakNext(),
- site);
+ store = Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
+ site);
store->SkipWriteBarrier();
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
@@ -519,7 +571,7 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
+ return AddLoadNamedField(GetParameter(0), access);
}
@@ -534,7 +586,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
+ return AddLoadNamedField(GetParameter(0), access);
}
@@ -640,14 +692,13 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HValue* constant_zero = graph()->GetConstant0();
HInstruction* elements = Add<HArgumentsElements>(false);
- HInstruction* argument = AddInstruction(
- new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero));
+ HInstruction* argument = Add<HAccessArgumentsAt>(
+ elements, constant_one, constant_zero);
HConstant* max_alloc_length =
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
const int initial_capacity = JSArray::kPreallocatedArrayElements;
- HConstant* initial_capacity_node = New<HConstant>(initial_capacity);
- AddInstruction(initial_capacity_node);
+ HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
HInstruction* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
IfBuilder if_builder(this);
@@ -690,8 +741,8 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
HValue* start = graph()->GetConstant0();
HValue* key = builder.BeginBody(start, length, Token::LT);
HInstruction* argument_elements = Add<HArgumentsElements>(false);
- HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt(
- argument_elements, length, key));
+ HInstruction* argument = Add<HAccessArgumentsAt>(
+ argument_elements, length, key);
Add<HStoreKeyed>(elements, key, argument, kind);
builder.EndBody();
@@ -792,7 +843,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
Handle<Type> type = stub->GetType(isolate, sentinel_map);
- BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
+ BuildCompareNil(GetParameter(0), type, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
@@ -812,6 +863,115 @@ Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
template <>
+HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
+ BinaryOpStub* stub = casted_stub();
+ HValue* left = GetParameter(0);
+ HValue* right = GetParameter(1);
+
+ Handle<Type> left_type = stub->GetLeftType(isolate());
+ Handle<Type> right_type = stub->GetRightType(isolate());
+ Handle<Type> result_type = stub->GetResultType(isolate());
+
+ ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
+ (stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
+
+ HValue* result = NULL;
+ if (stub->operation() == Token::ADD &&
+ (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
+ !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
+ // For the generic add stub a fast case for string addition is performance
+ // critical.
+ if (left_type->Maybe(Type::String())) {
+ IfBuilder if_leftisstring(this);
+ if_leftisstring.If<HIsStringAndBranch>(left);
+ if_leftisstring.Then();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ handle(Type::String(), isolate()), right_type,
+ result_type, stub->fixed_right_arg(), true)));
+ }
+ if_leftisstring.Else();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, right_type, result_type,
+ stub->fixed_right_arg(), true)));
+ }
+ if_leftisstring.End();
+ result = Pop();
+ } else {
+ IfBuilder if_rightisstring(this);
+ if_rightisstring.If<HIsStringAndBranch>(right);
+ if_rightisstring.Then();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, handle(Type::String(), isolate()),
+ result_type, stub->fixed_right_arg(), true)));
+ }
+ if_rightisstring.Else();
+ {
+ Push(AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, right_type, result_type,
+ stub->fixed_right_arg(), true)));
+ }
+ if_rightisstring.End();
+ result = Pop();
+ }
+ } else {
+ result = AddInstruction(BuildBinaryOperation(
+ stub->operation(), left, right,
+ left_type, right_type, result_type,
+ stub->fixed_right_arg(), true));
+ }
+
+ // If we encounter a generic argument, the number conversion is
+ // observable, thus we cannot afford to bail out after the fact.
+ if (!stub->HasSideEffects(isolate())) {
+ if (result_type->Is(Type::Smi())) {
+ if (stub->operation() == Token::SHR) {
+ // TODO(olivf) Replace this by a SmiTagU Instruction.
+ // 0x40000000: this number would convert to negative when interpreting
+ // the register as signed value;
+ IfBuilder if_of(this);
+ if_of.IfNot<HCompareNumericAndBranch>(result,
+ Add<HConstant>(static_cast<int>(SmiValuesAre32Bits()
+ ? 0x80000000 : 0x40000000)), Token::EQ_STRICT);
+ if_of.Then();
+ if_of.ElseDeopt("UInt->Smi oveflow");
+ if_of.End();
+ }
+ }
+ result = EnforceNumberType(result, result_type);
+ }
+
+ // Reuse the double box of one of the operands if we are allowed to (i.e.
+ // chained binops).
+ if (stub->CanReuseDoubleBox()) {
+ HValue* operand = (stub->mode() == OVERWRITE_LEFT) ? left : right;
+ IfBuilder if_heap_number(this);
+ if_heap_number.IfNot<HIsSmiAndBranch>(operand);
+ if_heap_number.Then();
+ Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result);
+ Push(operand);
+ if_heap_number.Else();
+ Push(result);
+ if_heap_number.End();
+ result = Pop();
+ }
+
+ return result;
+}
+
+
+Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
@@ -918,8 +1078,7 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
HValue* native_context,
HValue* code_object) {
Counters* counters = isolate()->counters();
- AddIncrementCounter(counters->fast_new_closure_install_optimized(),
- context());
+ AddIncrementCounter(counters->fast_new_closure_install_optimized());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
@@ -967,7 +1126,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
is_optimized.Else();
{
- AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
+ AddIncrementCounter(counters->fast_new_closure_try_optimized());
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
@@ -1012,8 +1171,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
restore_check.Else();
{
- HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
- shared_function_entry_length));
+ HValue* keyed_minus = AddUncasted<HSub>(
+ key, shared_function_entry_length);
HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
@@ -1022,8 +1181,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
done_check.Then();
{
// Hit: fetch the optimized code.
- HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
- keyed_minus, graph()->GetConstant1()));
+ HValue* keyed_plus = AddUncasted<HAdd>(
+ keyed_minus, graph()->GetConstant1());
HValue* code_object = Add<HLoadKeyed>(optimized_map,
keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
@@ -1052,11 +1211,12 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Add<HConstant>(factory->empty_fixed_array());
HValue* shared_info = GetParameter(0);
+ AddIncrementCounter(counters->fast_new_closure_total());
+
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
- AddIncrementCounter(counters->fast_new_closure_total(), context());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->is_generator());
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index ace4af42a9..e68a5dd0c8 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -41,7 +41,7 @@ namespace internal {
CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
: register_param_count_(-1),
- stack_parameter_count_(NULL),
+ stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
@@ -129,6 +129,11 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
}
+void CodeStub::VerifyPlatformFeatures(Isolate* isolate) {
+ ASSERT(CpuFeatures::VerifyCrossCompiling());
+}
+
+
Handle<Code> CodeStub::GetCode(Isolate* isolate) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
@@ -137,9 +142,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
+ ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
+#ifdef DEBUG
+ VerifyPlatformFeatures(isolate);
+#endif
+
{
HandleScope scope(isolate);
@@ -203,119 +213,471 @@ void CodeStub::PrintName(StringStream* stream) {
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
+void BinaryOpStub::PrintBaseName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* ovr = "";
+ if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
+ if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
+ stream->Add("BinaryOpStub_%s%s", op_name, ovr);
+}
+
- BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
- if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
- // The OddballStub handles a number and an oddball, not two oddballs.
- operands_type = BinaryOpIC::GENERIC;
+void BinaryOpStub::PrintState(StringStream* stream) {
+ stream->Add("(");
+ stream->Add(StateToName(left_state_));
+ stream->Add("*");
+ if (fixed_right_arg_.has_value) {
+ stream->Add("%d", fixed_right_arg_.value);
+ } else {
+ stream->Add(StateToName(right_state_));
}
- switch (operands_type) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
+ stream->Add("->");
+ stream->Add(StateToName(result_state_));
+ stream->Add(")");
+}
+
+
+Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
+ Handle<Object> right,
+ Isolate* isolate) {
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
+ Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
+ Object* builtin = builtins->javascript_builtin(func);
+ Handle<JSFunction> builtin_function =
+ Handle<JSFunction>(JSFunction::cast(builtin), isolate);
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(isolate, builtin_function, left,
+ 1, &right, &caught_exception);
+ return Maybe<Handle<Object> >(!caught_exception, result);
+}
+
+
+void BinaryOpStub::Initialize() {
+ fixed_right_arg_.has_value = false;
+ left_state_ = right_state_ = result_state_ = NONE;
+}
+
+
+void BinaryOpStub::Generate(Token::Value op,
+ State left,
+ State right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate) {
+ BinaryOpStub stub(INITIALIZED);
+ stub.op_ = op;
+ stub.left_state_ = left;
+ stub.right_state_ = right;
+ stub.result_state_ = result;
+ stub.mode_ = mode;
+ stub.GetCode(isolate);
+}
+
+
+void BinaryOpStub::Generate(Token::Value op,
+ State left,
+ int right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate) {
+ BinaryOpStub stub(INITIALIZED);
+ stub.op_ = op;
+ stub.left_state_ = left;
+ stub.fixed_right_arg_.has_value = true;
+ stub.fixed_right_arg_.value = right;
+ stub.right_state_ = SMI;
+ stub.result_state_ = result;
+ stub.mode_ = mode;
+ stub.GetCode(isolate);
+}
+
+
+void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
+ Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
+ Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
+ Token::BIT_XOR, Token::SHL, Token::SHR};
+ for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
+ BinaryOpStub stub(UNINITIALIZED);
+ stub.op_ = binop[i];
+ stub.GetCode(isolate);
}
+
+ // TODO(olivf) We should investigate why adding stubs to the snapshot is so
+ // expensive at runtime. When solved we should be able to add most binops to
+ // the snapshot instead of hand-picking them.
+ // Generated list of commonly used stubs
+ Generate(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 2, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 4, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 8, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
}
-#define __ ACCESS_MASM(masm)
+bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
+ return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
+ FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
+}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
+int BinaryOpStub::encode_arg_value(int32_t value) const {
+ ASSERT(can_encode_arg_value(value));
+ return WhichPowerOf2(value);
+}
+
+
+int32_t BinaryOpStub::decode_arg_value(int value) const {
+ return 1 << value;
+}
+
+
+int BinaryOpStub::encode_token(Token::Value op) const {
+ ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
+ return op - FIRST_TOKEN;
+}
+
+
+Token::Value BinaryOpStub::decode_token(int op) const {
+ int res = op + FIRST_TOKEN;
+ ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
+ return static_cast<Token::Value>(res);
+}
+
+
+const char* BinaryOpStub::StateToName(State state) {
+ switch (state) {
+ case NONE:
+ return "None";
+ case SMI:
+ return "Smi";
+ case INT32:
+ return "Int32";
+ case NUMBER:
+ return "Number";
+ case STRING:
+ return "String";
+ case GENERIC:
+ return "Generic";
+ }
+ return "";
+}
+
+
+void BinaryOpStub::UpdateStatus(Handle<Object> left,
+ Handle<Object> right,
+ Maybe<Handle<Object> > result) {
+ int old_state = GetExtraICState();
+
+ UpdateStatus(left, &left_state_);
+ UpdateStatus(right, &right_state_);
+
+ int32_t value;
+ bool new_has_fixed_right_arg =
+ right->ToInt32(&value) && can_encode_arg_value(value) &&
+ (left_state_ == SMI || left_state_ == INT32) &&
+ (result_state_ == NONE || !fixed_right_arg_.has_value);
+
+ fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
+
+ if (result.has_value) UpdateStatus(result.value, &result_state_);
+
+ State max_input = Max(left_state_, right_state_);
+
+ if (!has_int_result() && op_ != Token::SHR &&
+ max_input <= NUMBER && max_input > result_state_) {
+ result_state_ = max_input;
+ }
+
+ ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) ||
+ op_ == Token::ADD);
+
+ if (old_state == GetExtraICState()) {
+ // Tagged operations can lead to non-truncating HChanges
+ if (left->IsUndefined() || left->IsBoolean()) {
+ left_state_ = GENERIC;
+ } else if (right->IsUndefined() || right->IsBoolean()) {
+ right_state_ = GENERIC;
+ } else {
+ // Since the fpu is to precise, we might bail out on numbers which
+ // actually would truncate with 64 bit precision.
+ ASSERT(!CpuFeatures::IsSupported(SSE2) &&
+ result_state_ <= INT32);
+ result_state_ = NUMBER;
+ }
+ }
+}
+
+
+void BinaryOpStub::UpdateStatus(Handle<Object> object,
+ State* state) {
+ bool is_truncating = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
+ op_ == Token::BIT_XOR || op_ == Token::SAR ||
+ op_ == Token::SHL || op_ == Token::SHR);
+ v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
+ if (object->IsBoolean() && is_truncating) {
+ // Booleans are converted by truncating by HChange.
+ type = TypeInfo::Integer32();
+ }
+ if (object->IsUndefined()) {
+ // Undefined will be automatically truncated for us by HChange.
+ type = is_truncating ? TypeInfo::Integer32() : TypeInfo::Double();
+ }
+ State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
+ State new_state = NONE;
+ if (type.IsSmi()) {
+ new_state = SMI;
+ } else if (type.IsInteger32()) {
+ new_state = int_state;
+ } else if (type.IsNumber()) {
+ new_state = NUMBER;
+ } else if (object->IsString() && operation() == Token::ADD) {
+ new_state = STRING;
+ } else {
+ new_state = GENERIC;
+ }
+ if ((new_state <= NUMBER && *state > NUMBER) ||
+ (new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
+ new_state = GENERIC;
+ }
+ *state = Max(*state, new_state);
+}
+
+
+Handle<Type> BinaryOpStub::StateToType(State state,
+ Isolate* isolate) {
+ Handle<Type> t = handle(Type::None(), isolate);
+ switch (state) {
+ case NUMBER:
+ t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
+ // Fall through.
+ case INT32:
+ t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
+ // Fall through.
+ case SMI:
+ t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
+
+ case STRING:
+ t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
+ case GENERIC:
+ return handle(Type::Any(), isolate);
break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
+ case NONE:
break;
- default:
- UNREACHABLE();
}
+ return t;
}
-#undef __
+Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
+ return StateToType(left_state_, isolate);
+}
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s+%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(left_type_),
- BinaryOpIC::GetName(right_type_));
+Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
+ return StateToType(right_state_, isolate);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
- GenerateBothStringStub(masm);
- return;
+Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
+ if (HasSideEffects(isolate)) return StateToType(NONE, isolate);
+ if (result_state_ == GENERIC && op_ == Token::ADD) {
+ return handle(Type::Union(handle(Type::Number(), isolate),
+ handle(Type::String(), isolate)), isolate);
+ }
+ ASSERT(result_state_ != GENERIC);
+ if (result_state_ == NUMBER && op_ == Token::SHR) {
+ return handle(Type::Unsigned32(), isolate);
}
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
+ return StateToType(result_state_, isolate);
}
@@ -759,6 +1121,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
+void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
+ NumberToStringStub stub;
+ InstallDescriptor(isolate, &stub);
+}
+
+
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
FastNewClosureStub stub(STRICT_MODE, false);
InstallDescriptor(isolate, &stub);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 946eb76962..80d99d8b68 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -30,8 +30,9 @@
#include "allocation.h"
#include "assembler.h"
-#include "globals.h"
#include "codegen.h"
+#include "globals.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -200,19 +201,21 @@ class CodeStub BASE_EMBEDDED {
virtual void PrintName(StringStream* stream);
+ // Returns a name for logging/debugging purposes.
+ SmartArrayPointer<const char> GetName();
+
protected:
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
+ virtual void VerifyPlatformFeatures(Isolate* isolate);
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
- // Returns a name for logging/debugging purposes.
- SmartArrayPointer<const char> GetName();
virtual void PrintBaseName(StringStream* stream);
virtual void PrintState(StringStream* stream) { }
@@ -278,7 +281,7 @@ enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor();
int register_param_count_;
- const Register* stack_parameter_count_;
+ Register stack_parameter_count_;
// if hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
int hint_stack_parameter_count_;
@@ -287,7 +290,7 @@ struct CodeStubInterfaceDescriptor {
Address deoptimization_handler_;
int environment_length() const {
- if (stack_parameter_count_ != NULL) {
+ if (stack_parameter_count_.is_valid()) {
return register_param_count_ + 1;
}
return register_param_count_;
@@ -318,7 +321,7 @@ struct CodeStubInterfaceDescriptor {
// defined outside of the platform directories
#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
((index) == (descriptor)->register_param_count_) \
- ? *((descriptor)->stack_parameter_count_) \
+ ? (descriptor)->stack_parameter_count_ \
: (descriptor)->register_params_[(index)]
@@ -402,9 +405,7 @@ enum StringAddFlags {
// Check right parameter.
STRING_ADD_CHECK_RIGHT = 1 << 1,
// Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
- // Stub needs a frame before calling the runtime
- STRING_ADD_ERECT_FRAME = 1 << 2
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
};
} } // namespace v8::internal
@@ -464,6 +465,27 @@ class ToNumberStub: public HydrogenCodeStub {
};
+class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ NumberToStringStub() {}
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kNumber = 0;
+
+ private:
+ virtual Major MajorKey() V8_OVERRIDE { return NumberToString; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
+};
+
+
class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
@@ -830,19 +852,12 @@ class FunctionPrototypeStub: public ICStub {
class StringLengthStub: public ICStub {
public:
- StringLengthStub(Code::Kind kind, bool support_wrapper)
- : ICStub(kind), support_wrapper_(support_wrapper) { }
+ explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
virtual void Generate(MacroAssembler* masm);
private:
STATIC_ASSERT(KindBits::kSize == 4);
- class WrapperModeBits: public BitField<bool, 4, 1> {};
- virtual CodeStub::Major MajorKey() { return StringLength; }
- virtual int MinorKey() {
- return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
- }
-
- bool support_wrapper_;
+ virtual CodeStub::Major MajorKey() { return StringLength; }
};
@@ -892,7 +907,7 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
- virtual Code::Kind GetCodeKind() const { return Code::STUB; }
+ virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
virtual int GetStubFlags() { return kind(); }
protected:
@@ -983,156 +998,177 @@ class KeyedLoadFieldStub: public LoadFieldStub {
};
-class BinaryOpStub: public PlatformCodeStub {
+class BinaryOpStub: public HydrogenCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- platform_specific_bit_(false),
- left_type_(BinaryOpIC::UNINITIALIZED),
- right_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- encoded_right_arg_(false, encode_arg_value(1)) {
+ : HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
+ ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
Initialize();
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- BinaryOpIC::TypeInfo result_type,
- Maybe<int32_t> fixed_right_arg)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- platform_specific_bit_(PlatformSpecificBits::decode(key)),
- left_type_(left_type),
- right_type_(right_type),
- result_type_(result_type),
- encoded_right_arg_(fixed_right_arg.has_value,
- encode_arg_value(fixed_right_arg.value)) { }
+ explicit BinaryOpStub(Code::ExtraICState state)
+ : op_(decode_token(OpBits::decode(state))),
+ mode_(OverwriteModeField::decode(state)),
+ fixed_right_arg_(
+ Maybe<int>(HasFixedRightArgBits::decode(state),
+ decode_arg_value(FixedRightArgValueBits::decode(state)))),
+ left_state_(LeftStateField::decode(state)),
+ right_state_(fixed_right_arg_.has_value
+ ? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
+ : RightStateField::decode(state)),
+ result_state_(ResultStateField::decode(state)) {
+ // We don't deserialize the SSE2 Field, since this is only used to be able
+ // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
+ // generation we always want it to reflect the current state.
+ ASSERT(!fixed_right_arg_.has_value ||
+ can_encode_arg_value(fixed_right_arg_.value));
+ }
+
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
- static void decode_types_from_minor_key(int minor_key,
- BinaryOpIC::TypeInfo* left_type,
- BinaryOpIC::TypeInfo* right_type,
- BinaryOpIC::TypeInfo* result_type) {
- *left_type =
- static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
- *right_type =
- static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
- *result_type =
- static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
+ static void GenerateAheadOfTime(Isolate* isolate);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
+ static void InitializeForIsolate(Isolate* isolate) {
+ BinaryOpStub binopStub(UNINITIALIZED);
+ binopStub.InitializeInterfaceDescriptor(
+ isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
+ }
+
+ virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
+ virtual InlineCacheState GetICState() {
+ if (Max(left_state_, right_state_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
+ return MONOMORPHIC;
}
- static Token::Value decode_op_from_minor_key(int minor_key) {
- return static_cast<Token::Value>(OpBits::decode(minor_key));
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
}
- static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
- return Maybe<int>(
- HasFixedRightArgBits::decode(minor_key),
- decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
+ virtual Code::ExtraICState GetExtraICState() {
+ bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
+ CpuFeatures::IsSafeForSnapshot(SSE2);
+
+ return OpBits::encode(encode_token(op_))
+ | LeftStateField::encode(left_state_)
+ | RightStateField::encode(fixed_right_arg_.has_value
+ ? NONE : right_state_)
+ | ResultStateField::encode(result_state_)
+ | HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
+ | FixedRightArgValueBits::encode(fixed_right_arg_.has_value
+ ? encode_arg_value(
+ fixed_right_arg_.value)
+ : 0)
+ | SSE2Field::encode(sse_field)
+ | OverwriteModeField::encode(mode_);
}
- int fixed_right_arg_value() const {
- return decode_arg_value(encoded_right_arg_.value);
+ bool CanReuseDoubleBox() {
+ return result_state_ <= NUMBER && result_state_ > SMI &&
+ ((left_state_ > SMI && left_state_ <= NUMBER &&
+ mode_ == OVERWRITE_LEFT) ||
+ (right_state_ > SMI && right_state_ <= NUMBER &&
+ mode_ == OVERWRITE_RIGHT));
}
- static bool can_encode_arg_value(int32_t value) {
- return value > 0 &&
- IsPowerOf2(value) &&
- FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
+ bool HasSideEffects(Isolate* isolate) const {
+ Handle<Type> left = GetLeftType(isolate);
+ Handle<Type> right = GetRightType(isolate);
+ return left->Maybe(Type::Receiver()) || right->Maybe(Type::Receiver());
}
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
- private:
- Token::Value op_;
- OverwriteMode mode_;
- bool platform_specific_bit_; // Indicates SSE3 on IA32.
+ Maybe<Handle<Object> > Result(Handle<Object> left,
+ Handle<Object> right,
+ Isolate* isolate);
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo left_type_;
- BinaryOpIC::TypeInfo right_type_;
- BinaryOpIC::TypeInfo result_type_;
+ Token::Value operation() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
- Maybe<int> encoded_right_arg_;
+ Handle<Type> GetLeftType(Isolate* isolate) const;
+ Handle<Type> GetRightType(Isolate* isolate) const;
+ Handle<Type> GetResultType(Isolate* isolate) const;
- static int encode_arg_value(int32_t value) {
- ASSERT(can_encode_arg_value(value));
- return WhichPowerOf2(value);
- }
+ void UpdateStatus(Handle<Object> left,
+ Handle<Object> right,
+ Maybe<Handle<Object> > result);
- static int32_t decode_arg_value(int value) {
- return 1 << value;
+ void PrintState(StringStream* stream);
+
+ private:
+ explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
+ op_(Token::ADD),
+ mode_(NO_OVERWRITE) {
+ Initialize();
}
+ void Initialize();
- virtual void PrintName(StringStream* stream);
+ enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
+ class LeftStateField: public BitField<State, 0, 3> {};
+ // When fixed right arg is set, we don't need to store the right state.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
+ class FixedRightArgValueBits: public BitField<int, 5, 4> {};
+ class RightStateField: public BitField<State, 5, 3> {};
+ class ResultStateField: public BitField<State, 9, 3> {};
+ class SSE2Field: public BitField<bool, 12, 1> {};
+ class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
+ class OpBits: public BitField<int, 15, 5> {};
+
+ virtual CodeStub::Major MajorKey() { return BinaryOp; }
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
- // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
- // Note: We actually do not need 7 bits for the operation, just 4 bits to
- // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class PlatformSpecificBits: public BitField<bool, 9, 1> {};
- class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
- class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
- class FixedRightArgValueBits: public BitField<int, 20, 5> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | PlatformSpecificBits::encode(platform_specific_bit_)
- | LeftTypeBits::encode(left_type_)
- | RightTypeBits::encode(right_type_)
- | ResultTypeBits::encode(result_type_)
- | HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
- | FixedRightArgValueBits::encode(encoded_right_arg_.value);
- }
+ static Handle<Type> StateToType(State state,
+ Isolate* isolate);
+ static void Generate(Token::Value op,
+ State left,
+ int right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate);
- // Platform-independent implementation.
- void Generate(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
+ static void Generate(Token::Value op,
+ State left,
+ State right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate);
- // Platform-independent signature, platform-specific implementation.
- void Initialize();
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
-
- // Entirely platform-specific methods are defined as static helper
- // functions in the <arch>/code-stubs-<arch>.cc files.
+ void UpdateStatus(Handle<Object> object,
+ State* state);
- virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
+ bool can_encode_arg_value(int32_t value) const;
+ int encode_arg_value(int32_t value) const;
+ int32_t decode_arg_value(int value) const;
+ int encode_token(Token::Value op) const;
+ Token::Value decode_token(int op) const;
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ bool has_int_result() const {
+ return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
+ op_ == Token::BIT_OR || op_ == Token::SAR || op_ == Token::SHL;
}
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
- }
+ const char* StateToName(State state);
+
+ void PrintBaseName(StringStream* stream);
- friend class CodeGenerator;
+ Token::Value op_;
+ OverwriteMode mode_;
+
+ Maybe<int> fixed_right_arg_;
+ State left_state_;
+ State right_state_;
+ State result_state_;
};
@@ -1318,6 +1354,11 @@ class CEntryStub : public PlatformCodeStub {
virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
+ protected:
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ };
+
private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
@@ -1705,7 +1746,9 @@ class DoubleToIStub : public PlatformCodeStub {
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
- SkipFastPathBits::encode(skip_fastpath);
+ SkipFastPathBits::encode(skip_fastpath) |
+ SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
@@ -1734,6 +1777,11 @@ class DoubleToIStub : public PlatformCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
+ protected:
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ }
+
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@@ -1748,6 +1796,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
+ class SSEBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index d33c7f06bd..573ddc6ce7 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -113,10 +113,12 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
masm->GetCode(&desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
- false, is_crankshafted);
+ false, is_crankshafted,
+ info->prologue_offset());
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
- code->set_prologue_offset(info->prologue_offset());
+ isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
+ code->instruction_size());
return code;
}
@@ -132,7 +134,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::FUNCTION;
+ if (print_source) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
@@ -160,12 +164,16 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
+ if (print_source) {
+ PrintF("source_position = %d\n", function->start_position());
+ }
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
code->Disassemble(CodeStub::MajorName(major_key, false));
} else {
code->Disassemble(*function->debug_name()->ToCString());
}
+ PrintF("--- End code ---\n");
}
#endif // ENABLE_DISASSEMBLER
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 47634ec22b..ed0a0c8e69 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -112,7 +112,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
- prologue_offset_ = kPrologueOffsetNotSet;
+ prologue_offset_ = Code::kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
@@ -123,7 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
- mode_ = isolate->use_crankshaft() ? mode : NONOPT;
+ mode_ = mode;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
@@ -260,7 +260,7 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
}
-void OptimizingCompiler::RecordOptimizationStats() {
+void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
@@ -297,23 +297,60 @@ void OptimizingCompiler::RecordOptimizationStats() {
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
- OptimizingCompiler compiler(info);
- OptimizingCompiler::Status status = compiler.CreateGraph();
+ RecompileJob job(info);
+ RecompileJob::Status status = job.CreateGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- return status != OptimizingCompiler::FAILED;
+ if (status != RecompileJob::SUCCEEDED) {
+ return status != RecompileJob::FAILED;
}
- status = compiler.OptimizeGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- status = compiler.AbortOptimization();
- return status != OptimizingCompiler::FAILED;
+ status = job.OptimizeGraph();
+ if (status != RecompileJob::SUCCEEDED) {
+ status = job.AbortOptimization();
+ return status != RecompileJob::FAILED;
}
- status = compiler.GenerateAndInstallCode();
- return status != OptimizingCompiler::FAILED;
+ status = job.GenerateAndInstallCode();
+ return status != RecompileJob::FAILED;
}
-OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
+class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder {
+ public:
+ explicit HOptimizedGraphBuilderWithPotisions(CompilationInfo* info)
+ : HOptimizedGraphBuilder(info) {
+ }
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ STATEMENT_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ MODULE_NODE_LIST(DEF_VISIT)
+ DECLARATION_NODE_LIST(DEF_VISIT)
+ AUXILIARY_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
@@ -419,7 +456,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
+ graph_builder_ = FLAG_emit_opt_code_positions
+ ? new(info()->zone()) HOptimizedGraphBuilderWithPotisions(info())
+ : new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@@ -452,7 +491,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
-OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+RecompileJob::Status RecompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -475,7 +514,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
}
-OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
@@ -555,6 +594,33 @@ static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
}
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+ int estimate) {
+ // See the comment in SetExpectedNofProperties.
+ if (shared->live_objects_may_exist()) return;
+
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // TODO(yangguo): check whether those heuristics are still up-to-date.
+ // We do not shrink objects that go into a snapshot (yet), so we adjust
+ // the estimate conservatively.
+ if (Serializer::enabled()) {
+ estimate += 2;
+ } else if (FLAG_clever_optimizations) {
+ // Inobject slack tracking will reclaim redundant inobject space later,
+ // so we can afford to adjust the estimate generously.
+ estimate += 8;
+ } else {
+ estimate += 3;
+ }
+
+ shared->set_expected_nof_properties(estimate);
+}
+
+
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@@ -599,66 +665,70 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
}
}
- // Measure how long it takes to do the compilation; only take the
- // rest of the function into account to avoid overlap with the
- // parsing statistics.
- HistogramTimer* rate = info->is_eval()
- ? info->isolate()->counters()->compile_eval()
- : info->isolate()->counters()->compile();
- HistogramTimerScope timer(rate);
-
- // Compile the code.
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return Handle<SharedFunctionInfo>::null();
- }
+ Handle<SharedFunctionInfo> result;
+ {
+ // Measure how long it takes to do the compilation; only take the
+ // rest of the function into account to avoid overlap with the
+ // parsing statistics.
+ HistogramTimer* rate = info->is_eval()
+ ? info->isolate()->counters()->compile_eval()
+ : info->isolate()->counters()->compile();
+ HistogramTimerScope timer(rate);
- // Allocate function.
- ASSERT(!info->code().is_null());
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- lit->is_generator(),
- info->code(),
- ScopeInfo::Create(info->scope(), info->zone()));
-
- ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code(),
- info));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
- }
+ // Compile the code.
+ if (!MakeCode(info)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return Handle<SharedFunctionInfo>::null();
+ }
+
+ // Allocate function.
+ ASSERT(!info->code().is_null());
+ result =
+ isolate->factory()->NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ lit->is_generator(),
+ info->code(),
+ ScopeInfo::Create(info->scope(), info->zone()));
+
+ ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+ Compiler::SetFunctionInfo(result, lit, true, script);
+
+ if (script->name()->IsString()) {
+ PROFILE(isolate, CodeCreateEvent(
+ info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *info->code(),
+ *result,
+ info,
+ String::cast(script->name())));
+ GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
+ script,
+ info->code(),
+ info));
+ } else {
+ PROFILE(isolate, CodeCreateEvent(
+ info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *info->code(),
+ *result,
+ info,
+ isolate->heap()->empty_string()));
+ GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
+ }
- // Hint to the runtime system used when allocating space for initial
- // property space by setting the expected number of properties for
- // the instances of the function.
- SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
+ // Hint to the runtime system used when allocating space for initial
+ // property space by setting the expected number of properties for
+ // the instances of the function.
+ SetExpectedNofPropertiesFromEstimate(result,
+ lit->expected_property_count());
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
@@ -1032,16 +1102,15 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
info->SaveHandles();
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- OptimizingCompiler* compiler =
- new(info->zone()) OptimizingCompiler(*info);
- OptimizingCompiler::Status status = compiler->CreateGraph();
- if (status == OptimizingCompiler::SUCCEEDED) {
+ RecompileJob* job = new(info->zone()) RecompileJob(*info);
+ RecompileJob::Status status = job->CreateGraph();
+ if (status == RecompileJob::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
- isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
ASSERT(!isolate->has_pending_exception());
return true;
- } else if (status == OptimizingCompiler::BAILED_OUT) {
+ } else if (status == RecompileJob::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
@@ -1054,9 +1123,8 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
}
-Handle<Code> Compiler::InstallOptimizedCode(
- OptimizingCompiler* optimizing_compiler) {
- SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+ SmartPointer<CompilationInfo> info(job->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
@@ -1077,24 +1145,24 @@ Handle<Code> Compiler::InstallOptimizedCode(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
- OptimizingCompiler::Status status = optimizing_compiler->last_status();
+ RecompileJob::Status status = job->last_status();
if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason(kBailedOutDueToDependencyChange);
- status = optimizing_compiler->AbortOptimization();
- } else if (status != OptimizingCompiler::SUCCEEDED) {
+ status = job->AbortOptimization();
+ } else if (status != RecompileJob::SUCCEEDED) {
info->set_bailout_reason(kFailedBailedOutLastTime);
- status = optimizing_compiler->AbortOptimization();
+ status = job->AbortOptimization();
} else if (isolate->DebuggerHasBreakPoints()) {
info->set_bailout_reason(kDebuggerIsActive);
- status = optimizing_compiler->AbortOptimization();
+ status = job->AbortOptimization();
} else {
- status = optimizing_compiler->GenerateAndInstallCode();
- ASSERT(status == OptimizingCompiler::SUCCEEDED ||
- status == OptimizingCompiler::BAILED_OUT);
+ status = job->GenerateAndInstallCode();
+ ASSERT(status == RecompileJob::SUCCEEDED ||
+ status == RecompileJob::BAILED_OUT);
}
InstallCodeCommon(*info);
- if (status == OptimizingCompiler::SUCCEEDED) {
+ if (status == RecompileJob::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
@@ -1115,8 +1183,8 @@ Handle<Code> Compiler::InstallOptimizedCode(
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsInRecompileQueue());
- return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
- : Handle<Code>::null();
+ return (status == RecompileJob::SUCCEEDED) ? info->code()
+ : Handle<Code>::null();
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 8ceb61db9c..2d9e52a8e3 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
-static const int kPrologueOffsetNotSet = -1;
-
class ScriptDataImpl;
class HydrogenCodeStub;
@@ -86,6 +84,7 @@ class CompilationInfo {
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
+ uint32_t osr_pc_offset() const { return osr_pc_offset_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
@@ -268,12 +267,12 @@ class CompilationInfo {
void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
int prologue_offset() const {
- ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
+ ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
- ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
+ ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
@@ -505,14 +504,15 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
-class OptimizingCompiler: public ZoneObject {
+class RecompileJob: public ZoneObject {
public:
- explicit OptimizingCompiler(CompilationInfo* info)
+ explicit RecompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
- last_status_(FAILED) { }
+ last_status_(FAILED),
+ awaiting_install_(false) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@@ -532,6 +532,13 @@ class OptimizingCompiler: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}
+ void WaitForInstall() {
+ ASSERT(info_->is_osr());
+ awaiting_install_ = true;
+ }
+
+ bool IsWaitingForInstall() { return awaiting_install_; }
+
private:
CompilationInfo* info_;
HOptimizedGraphBuilder* graph_builder_;
@@ -541,6 +548,7 @@ class OptimizingCompiler: public ZoneObject {
TimeDelta time_taken_to_optimize_;
TimeDelta time_taken_to_codegen_;
Status last_status_;
+ bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@@ -549,9 +557,8 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, TimeDelta* location)
- : compiler_(compiler),
- location_(location) {
+ Timer(RecompileJob* job, TimeDelta* location)
+ : job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
@@ -560,7 +567,7 @@ class OptimizingCompiler: public ZoneObject {
*location_ += timer_.Elapsed();
}
- OptimizingCompiler* compiler_;
+ RecompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
@@ -625,7 +632,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
- static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
+ static Handle<Code> InstallOptimizedCode(RecompileJob* job);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 441ef9d9c3..710d30aa8e 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 2f0a399d1a..7ba19ba0f1 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent--;
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index cdc42e34d9..5f1219eea9 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -31,6 +31,7 @@
#include "conversions-inl.h"
#include "dtoa.h"
+#include "list-inl.h"
#include "strtod.h"
#include "utils.h"
@@ -45,8 +46,11 @@ namespace internal {
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
- const char* end = str + StrLength(str);
- return InternalStringToDouble(unicode_cache, str, end, flags,
+ // We cast to const uint8_t* here to avoid instantiating the
+ // InternalStringToDouble() template for const char* as well.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
+ const uint8_t* end = start + StrLength(str);
+ return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
@@ -55,11 +59,15 @@ double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val) {
- const char* end = str.start() + str.length();
- return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+ // We cast to const uint8_t* here to avoid instantiating the
+ // InternalStringToDouble() template for const char* as well.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
+ const uint8_t* end = start + str.length();
+ return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
+
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 93911d7216..821c25f8ce 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -259,22 +259,51 @@ class HistogramTimer : public Histogram {
return Enabled() && timer_.IsStarted();
}
+ // TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
+#ifdef DEBUG
+ ElapsedTimer* timer() { return &timer_; }
+#endif
+
private:
ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
+// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the
+// Parser is currently reentrant (when it throws an error, we call back
+// into JavaScript and all bets are off), but ElapsedTimer is not
+// reentry-safe. Fix this properly and remove |allow_nesting|.
class HistogramTimerScope BASE_EMBEDDED {
public:
- explicit HistogramTimerScope(HistogramTimer* timer) :
- timer_(timer) {
+ explicit HistogramTimerScope(HistogramTimer* timer,
+ bool allow_nesting = false)
+#ifdef DEBUG
+ : timer_(timer),
+ skipped_timer_start_(false) {
+ if (timer_->timer()->IsStarted() && allow_nesting) {
+ skipped_timer_start_ = true;
+ } else {
+ timer_->Start();
+ }
+#else
+ : timer_(timer) {
timer_->Start();
+#endif
}
~HistogramTimerScope() {
+#ifdef DEBUG
+ if (!skipped_timer_start_) {
+ timer_->Stop();
+ }
+#else
timer_->Stop();
+#endif
}
private:
HistogramTimer* timer_;
+#ifdef DEBUG
+ bool skipped_timer_start_;
+#endif
};
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index e0f7aea18a..b1af621ccc 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -64,14 +64,15 @@ void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(last_code_event_id_);
- TickSample* sample = &record.sample;
- sample->state = isolate->current_vm_state();
- sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- for (StackTraceFrameIterator it(isolate);
- !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
- it.Advance()) {
- sample->stack[sample->frames_count++] = it.frame()->pc();
+ RegisterState regs;
+ StackFrameIterator it(isolate);
+ if (!it.done()) {
+ StackFrame* frame = it.frame();
+ regs.sp = frame->sp();
+ regs.fp = frame->fp();
+ regs.pc = frame->pc();
}
+ record.sample.Init(isolate, regs);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -260,7 +261,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line) {
+ Name* source, int line, int column) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -270,7 +271,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
profiles_->GetFunctionName(shared->DebugName()),
CodeEntry::kEmptyNamePrefix,
profiles_->GetName(source),
- line);
+ line,
+ column);
if (info) {
rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
}
@@ -435,8 +437,18 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
+ // : object allocated on the heap may not be aligned 64". We need to
+ // figure out if this is a legitimate warning or a compiler bug.
+ #pragma warning(push)
+ #pragma warning(disable:4316)
+#endif
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ #pragma warning(pop)
+#endif
is_profiling_ = true;
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 8aba5426d5..fcb9a67ddf 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -238,7 +238,7 @@ class CpuProfiler : public CodeEventListener {
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line);
+ Name* source, int line, int column);
virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
virtual void CodeMovingGCEvent() {}
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 602ae166be..379631cb7c 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -30,8 +30,6 @@
#include "d8.h"
#include "d8-debug.h"
#include "debug-agent.h"
-#include "platform.h"
-#include "platform/socket.h"
namespace v8 {
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 424dbbb393..81c15ae742 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -245,7 +245,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[3]->IsNumber()) {
*total_timeout = args[3]->Int32Value();
} else {
- ThrowException(String::New("system: Argument 4 must be a number"));
+ args.GetIsolate()->ThrowException(
+ String::New("system: Argument 4 must be a number"));
return false;
}
}
@@ -253,7 +254,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[2]->IsNumber()) {
*read_timeout = args[2]->Int32Value();
} else {
- ThrowException(String::New("system: Argument 3 must be a number"));
+ args.GetIsolate()->ThrowException(
+ String::New("system: Argument 3 must be a number"));
return false;
}
}
@@ -456,7 +458,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- ThrowException(String::New("system: Argument 2 must be an array"));
+ args.GetIsolate()->ThrowException(
+ String::New("system: Argument 2 must be an array"));
return;
}
command_args = Handle<Array>::Cast(args[1]);
@@ -464,11 +467,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
command_args = Array::New(0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- ThrowException(String::New("Too many arguments to system()"));
+ args.GetIsolate()->ThrowException(
+ String::New("Too many arguments to system()"));
return;
}
if (args.Length() < 1) {
- ThrowException(String::New("Too few arguments to system()"));
+ args.GetIsolate()->ThrowException(
+ String::New("Too few arguments to system()"));
return;
}
@@ -483,11 +488,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
- ThrowException(String::New("pipe syscall failed."));
+ args.GetIsolate()->ThrowException(
+ String::New("pipe syscall failed."));
return;
}
if (pipe(stdout_fds) != 0) {
- ThrowException(String::New("pipe syscall failed."));
+ args.GetIsolate()->ThrowException(
+ String::New("pipe syscall failed."));
return;
}
@@ -531,17 +538,17 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
if (chdir(*directory) != 0) {
- ThrowException(String::New(strerror(errno)));
+ args.GetIsolate()->ThrowException(String::New(strerror(errno)));
return;
}
}
@@ -550,7 +557,7 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
if (args[0]->IsNumber()) {
@@ -560,7 +567,7 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
} else {
const char* message = "umask() argument must be numeric";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
}
@@ -616,18 +623,18 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
mkdirp(*directory, mask);
@@ -637,13 +644,13 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
rmdir(*directory);
@@ -653,7 +660,7 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value var(args[0]);
@@ -661,13 +668,13 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
setenv(*var, *value, 1);
@@ -677,14 +684,14 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(String::New(message));
return;
}
unsetenv(*var);
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
index 298518d72a..0226f31c0b 100644
--- a/deps/v8/src/d8-readline.cc
+++ b/deps/v8/src/d8-readline.cc
@@ -150,7 +150,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
Locker lock(isolate);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Array> completions;
if (state == 0) {
Local<String> full_text = String::New(rl_line_buffer, rl_point);
@@ -167,8 +167,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
String::Utf8Value str(str_obj);
return strdup(*str);
} else {
- current_completions.Dispose(isolate);
- current_completions.Clear();
+ current_completions.Reset();
return NULL;
}
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index fb75d81c24..357c8a4899 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -49,6 +49,7 @@
#endif // !V8_SHARED
#ifdef V8_SHARED
+#include "../include/v8-defaults.h"
#include "../include/v8-testing.h"
#endif // V8_SHARED
@@ -66,6 +67,7 @@
#include "natives.h"
#include "platform.h"
#include "v8.h"
+#include "v8-defaults.h"
#endif // V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
@@ -158,6 +160,7 @@ i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
i::Mutex Shell::context_mutex_;
+const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@@ -263,7 +266,8 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
- data_->realms_[0].Reset(data_->isolate_, Context::GetEntered());
+ data_->realms_[0].Reset(data_->isolate_,
+ data_->isolate_->GetEnteredContext());
data_->realm_shared_.Clear();
}
@@ -286,11 +290,20 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
}
+#ifndef V8_SHARED
+// performance.now() returns a time stamp as double, measured in milliseconds.
+void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
+ args.GetReturnValue().Set(delta.InMillisecondsF());
+}
+#endif // V8_SHARED
+
+
// Realm.current() returns the index of the currently active realm.
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- int index = data->RealmFind(Context::GetEntered());
+ int index = data->RealmFind(isolate->GetEnteredContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -869,6 +882,13 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
RealmSharedGet, RealmSharedSet);
global_template->Set(String::New("Realm"), realm_template);
+#ifndef V8_SHARED
+ Handle<ObjectTemplate> performance_template = ObjectTemplate::New();
+ performance_template->Set(String::New("now"),
+ FunctionTemplate::New(PerformanceNow));
+ global_template->Set(String::New("performance"), performance_template);
+#endif // V8_SHARED
+
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
AddOSMethods(os_templ);
@@ -939,8 +959,8 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
- factory->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
+ factory->NewFixedArray(js_args.argc);
+ for (int j = 0; j < js_args.argc; j++) {
i::Handle<i::String> arg =
factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
@@ -1228,6 +1248,7 @@ SourceGroup::~SourceGroup() {
void SourceGroup::Execute(Isolate* isolate) {
+ bool exception_was_thrown = false;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@@ -1236,7 +1257,8 @@ void SourceGroup::Execute(Isolate* isolate) {
Handle<String> file_name = String::New("unnamed");
Handle<String> source = String::New(argv_[i + 1]);
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
- Shell::Exit(1);
+ exception_was_thrown = true;
+ break;
}
++i;
} else if (arg[0] == '-') {
@@ -1251,10 +1273,14 @@ void SourceGroup::Execute(Isolate* isolate) {
Shell::Exit(1);
}
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
- Shell::Exit(1);
+ exception_was_thrown = true;
+ break;
}
}
}
+ if (exception_was_thrown != Shell::options.expected_to_throw) {
+ Shell::Exit(1);
+ }
}
@@ -1410,6 +1436,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.dump_heap_constants = true;
argv[i] = NULL;
#endif
+ } else if (strcmp(argv[i], "--throws") == 0) {
+ options.expected_to_throw = true;
+ argv[i] = NULL;
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@@ -1525,7 +1554,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
// Start preemption if threads have been created and preemption is enabled.
if (threads.length() > 0
&& options.use_preemption) {
- Locker::StartPreemption(options.preemption_interval);
+ Locker::StartPreemption(isolate, options.preemption_interval);
}
#endif // V8_SHARED
}
@@ -1543,7 +1572,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
if (threads.length() > 0 && options.use_preemption) {
Locker lock(isolate);
- Locker::StopPreemption();
+ Locker::StopPreemption(isolate);
}
#endif // V8_SHARED
return 0;
@@ -1648,6 +1677,7 @@ int Shell::Main(int argc, char* argv[]) {
#else
SetStandaloneFlagsViaCommandLine();
#endif
+ v8::SetDefaultResourceConstraintsForCurrentPlatform();
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
int result = 0;
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 15d342dece..097abc0465 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -31,7 +31,7 @@
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
@@ -81,13 +81,13 @@
}],
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
],
}],
],
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 1ae1bcfe6e..411dfdda3e 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -232,6 +232,7 @@ class ShellOptions {
interactive_shell(false),
test_shell(false),
dump_heap_constants(false),
+ expected_to_throw(false),
num_isolates(1),
isolate_sources(NULL) { }
@@ -256,6 +257,7 @@ class ShellOptions {
bool interactive_shell;
bool test_shell;
bool dump_heap_constants;
+ bool expected_to_throw;
int num_isolates;
SourceGroup* isolate_sources;
};
@@ -300,6 +302,8 @@ class Shell : public i::AllStatic {
Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
+
+ static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
#endif // V8_SHARED
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -391,6 +395,7 @@ class Shell : public i::AllStatic {
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static i::Mutex context_mutex_;
+ static const i::TimeTicks kInitialTicks;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 3efea06378..35b61d54ee 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -40,7 +40,7 @@ function log10(num) {
function ToInspectableObject(obj) {
if (!obj && typeof obj === 'object') {
- return void 0;
+ return UNDEFINED;
} else {
return Object(obj);
}
@@ -333,7 +333,7 @@ function DebugRequest(cmd_line) {
}
if ((cmd === undefined) || !cmd) {
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
return;
}
@@ -492,7 +492,7 @@ function DebugRequest(cmd_line) {
case 'trace':
case 'tr':
// Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
this.traceCommand_(args);
break;
@@ -500,7 +500,7 @@ function DebugRequest(cmd_line) {
case '?':
this.helpCommand_(args);
// Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
break;
default:
@@ -2124,7 +2124,7 @@ function SimpleObjectToJSON_(object) {
var property_value_json;
switch (typeof property_value) {
case 'object':
- if (property_value === null) {
+ if (IS_NULL(property_value)) {
property_value_json = 'null';
} else if (typeof property_value.toJSONProtocol == 'function') {
property_value_json = property_value.toJSONProtocol(true);
@@ -2217,7 +2217,7 @@ function Stringify(x, depth) {
case "symbol":
return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")"
case "object":
- if (x === null) return "null";
+ if (IS_NULL(x)) return "null";
if (x.constructor && x.constructor.name === "Array") {
var elems = [];
for (var i = 0; i < x.length; ++i) {
@@ -2233,7 +2233,7 @@ function Stringify(x, depth) {
var props = [];
for (var name in x) {
var desc = Object.getOwnPropertyDescriptor(x, name);
- if (desc === void 0) continue;
+ if (IS_UNDEFINED(desc)) continue;
if ("value" in desc) {
props.push(name + ": " + Stringify(desc.value, depth - 1));
}
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 62999e9de6..1b128c3a0a 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -41,7 +41,7 @@ function ThrowDateTypeError() {
}
-var timezone_cache_time = $NaN;
+var timezone_cache_time = NAN;
var timezone_cache_timezone;
function LocalTimezone(t) {
@@ -66,10 +66,10 @@ function UTC(time) {
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
- if (!$isFinite(hour)) return $NaN;
- if (!$isFinite(min)) return $NaN;
- if (!$isFinite(sec)) return $NaN;
- if (!$isFinite(ms)) return $NaN;
+ if (!$isFinite(hour)) return NAN;
+ if (!$isFinite(min)) return NAN;
+ if (!$isFinite(sec)) return NAN;
+ if (!$isFinite(ms)) return NAN;
return TO_INTEGER(hour) * msPerHour
+ TO_INTEGER(min) * msPerMinute
+ TO_INTEGER(sec) * msPerSecond
@@ -90,7 +90,7 @@ function TimeInYear(year) {
// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
function MakeDay(year, month, date) {
- if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
+ if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return NAN;
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
@@ -99,7 +99,7 @@ function MakeDay(year, month, date) {
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth) {
- return $NaN;
+ return NAN;
}
// Now we rely on year and month being SMIs.
@@ -115,15 +115,15 @@ function MakeDate(day, time) {
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
- if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
+ if ($abs(time) > MAX_TIME_BEFORE_UTC) return NAN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
- if (!$isFinite(time)) return $NaN;
- if ($abs(time) > MAX_TIME_MS) return $NaN;
+ if (!$isFinite(time)) return NAN;
+ if ($abs(time) > MAX_TIME_MS) return NAN;
return TO_INTEGER(time);
}
@@ -132,7 +132,7 @@ function TimeClip(time) {
// strings over and over again.
var Date_cache = {
// Cached time value.
- time: $NaN,
+ time: NAN,
// String input for which the cached time is valid.
string: null
};
@@ -269,7 +269,7 @@ var parse_buffer = $Array(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
var arr = %DateParseString(ToString(string), parse_buffer);
- if (IS_NULL(arr)) return $NaN;
+ if (IS_NULL(arr)) return NAN;
var day = MakeDay(arr[0], arr[1], arr[2]);
var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
@@ -671,7 +671,7 @@ function DateGetYear() {
function DateSetYear(year) {
CHECK_DATE(this);
year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
+ if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var t = LOCAL_DATE_VALUE(this);
@@ -746,12 +746,12 @@ function DateToJSON(key) {
function ResetDateCache() {
// Reset the timezone cache:
- timezone_cache_time = $NaN;
+ timezone_cache_time = NAN;
timezone_cache_timezone = undefined;
// Reset the date cache:
cache = Date_cache;
- cache.time = $NaN;
+ cache.time = NAN;
cache.string = null;
}
@@ -762,7 +762,7 @@ function SetUpDate() {
%CheckIsBootstrapping();
%SetCode($Date, DateConstructor);
- %FunctionSetPrototype($Date, new $Date($NaN));
+ %FunctionSetPrototype($Date, new $Date(NAN));
// Set up non-enumerable properties of the Date object itself.
InstallFunctions($Date, DONT_ENUM, $Array(
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index 19209d4b95..b159ae3b29 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -448,7 +448,7 @@ ScriptBreakPoint.prototype.set = function (script) {
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
- if (position === null) return;
+ if (IS_NULL(position)) return;
// Create a break point object and set the break point.
break_point = MakeBreakPoint(position, this);
@@ -2064,7 +2064,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
- return void 0;
+ return UNDEFINED;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 0496b8cb00..35970e5ee9 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1793,10 +1793,14 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
- if (!holder.is_null() && holder->IsJSFunction() &&
- !JSFunction::cast(*holder)->IsBuiltin()) {
+ if (!holder.is_null() && holder->IsJSFunction()) {
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- Debug::FloodWithOneShot(js_function);
+ if (!js_function->IsBuiltin()) {
+ Debug::FloodWithOneShot(js_function);
+ } else if (js_function->shared()->bound()) {
+ // Handle Function.prototype.bind
+ Debug::FloodBoundFunctionWithOneShot(js_function);
+ }
}
} else {
Debug::FloodWithOneShot(function);
@@ -2102,6 +2106,7 @@ void Debug::PrepareForBreakPoints() {
if (!shared->allows_lazy_compilation()) continue;
if (!shared->script()->IsScript()) continue;
+ if (function->IsBuiltin()) continue;
if (shared->code()->gc_metadata() == active_code_marker) continue;
Code::Kind kind = function->code()->kind();
@@ -3131,8 +3136,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
- v8::Handle<v8::Boolean> running =
- auto_continue ? v8::True() : v8::False();
+ v8::Handle<v8::Boolean> running = v8::Boolean::New(auto_continue);
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { running };
cmd_processor = v8::Local<v8::Object>::Cast(
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 2b5f43ab49..8e71ea6705 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -38,6 +38,7 @@
#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
+#include "platform/socket.h"
#include "string-stream.h"
#include "v8threads.h"
diff --git a/deps/v8/src/marking-thread.cc b/deps/v8/src/defaults.cc
index 58bca3662d..a03cf69b08 100644
--- a/deps/v8/src/marking-thread.cc
+++ b/deps/v8/src/defaults.cc
@@ -25,65 +25,46 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "marking-thread.h"
+// The GYP based build ends up defining USING_V8_SHARED when compiling this
+// file.
+#undef USING_V8_SHARED
+#include "../include/v8-defaults.h"
+#include "platform.h"
+#include "globals.h"
#include "v8.h"
-#include "isolate.h"
-#include "v8threads.h"
-
namespace v8 {
-namespace internal {
-
-MarkingThread::MarkingThread(Isolate* isolate)
- : Thread("MarkingThread"),
- isolate_(isolate),
- heap_(isolate->heap()),
- start_marking_semaphore_(0),
- end_marking_semaphore_(0),
- stop_semaphore_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
- id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
-}
-
-
-Atomic32 MarkingThread::id_counter_ = -1;
-
-
-void MarkingThread::Run() {
- Isolate::SetIsolateThreadLocals(isolate_, NULL);
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- while (true) {
- start_marking_semaphore_.Wait();
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_.Signal();
- return;
- }
-
- end_marking_semaphore_.Signal();
+bool ConfigureResourceConstraintsForCurrentPlatform(
+ ResourceConstraints* constraints) {
+ if (constraints == NULL) {
+ return false;
}
-}
-
-
-void MarkingThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_marking_semaphore_.Signal();
- stop_semaphore_.Wait();
- Join();
-}
-
-void MarkingThread::StartMarking() {
- start_marking_semaphore_.Signal();
+ int lump_of_memory = (i::kPointerSize / 4) * i::MB;
+
+ // The young_space_size should be a power of 2 and old_generation_size should
+ // be a multiple of Page::kPageSize.
+#if V8_OS_ANDROID
+ constraints->set_max_young_space_size(8 * lump_of_memory);
+ constraints->set_max_old_space_size(256 * lump_of_memory);
+ constraints->set_max_executable_size(192 * lump_of_memory);
+#else
+ constraints->set_max_young_space_size(16 * lump_of_memory);
+ constraints->set_max_old_space_size(700 * lump_of_memory);
+ constraints->set_max_executable_size(256 * lump_of_memory);
+#endif
+ return true;
}
-void MarkingThread::WaitForMarkingThread() {
- end_marking_semaphore_.Wait();
+bool SetDefaultResourceConstraintsForCurrentPlatform() {
+ ResourceConstraints constraints;
+ if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints))
+ return false;
+ return SetResourceConstraints(&constraints);
}
-} } // namespace v8::internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index c979a534d8..84e80b9d9a 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -1494,7 +1494,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
intptr_t caller_arg_count = 0;
- bool arg_count_known = descriptor->stack_parameter_count_ == NULL;
+ bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
@@ -1614,12 +1614,16 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
}
} else {
// Dispatch on the instance type of the object to be materialized.
- Handle<Map> map = Handle<Map>::cast(MaterializeNextValue());
+ // We also need to make sure that the representation of all fields
+ // in the given object are general enough to hold a tagged value.
+ Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
+ Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
- Handle<HeapNumber> number =
- Handle<HeapNumber>::cast(MaterializeNextValue());
- materialized_objects_->Add(number);
+ Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0);
+ materialized_objects_->Add(object);
+ Handle<Object> number = MaterializeNextValue();
+ object->set_value(number->Number());
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
@@ -1693,29 +1697,35 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// output frames are used to materialize arguments objects later on they need
// to already contain valid heap numbers.
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (trace_) {
PrintF("Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address());
+ d.destination());
}
- Memory::Object_at(d.slot_address()) = *num;
+ Memory::Object_at(d.destination()) = *num;
}
// Materialize all heap numbers required for arguments/captured objects.
- for (int i = 0; i < values.length(); i++) {
- if (!values.at(i)->IsTheHole()) continue;
- double double_value = deferred_objects_double_values_[i];
- Handle<Object> num = isolate_->factory()->NewNumber(double_value);
+ for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
+ HeapNumberMaterializationDescriptor<int> d =
+ deferred_objects_double_values_[i];
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (trace_) {
- PrintF("Materialized a new heap number %p [%e] for object\n",
- reinterpret_cast<void*>(*num), double_value);
+ PrintF("Materialized a new heap number %p [%e] for object at %d\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.destination());
}
- values.Set(i, num);
+ ASSERT(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), num);
}
+ // Play it safe and clear all object double values before we continue.
+ deferred_objects_double_values_.Clear();
+
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
@@ -1765,11 +1775,11 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_bottom = parameters_top + parameters_size;
Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
- Address slot = d.slot_address();
+ Address slot = d.destination();
if (parameters_top <= slot && slot < parameters_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@@ -1781,7 +1791,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address(),
+ d.destination(),
index);
}
@@ -1797,7 +1807,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address(),
+ d.destination(),
index);
}
@@ -2337,85 +2347,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
-void Deoptimizer::PatchInterruptCode(Isolate* isolate,
- Code* unoptimized) {
- DisallowHeapAllocation no_gc;
- Code* replacement_code =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-
- // Iterate over the back edge table and patch every interrupt
- // call to an unconditional call to the replacement code.
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
- ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()));
- PatchInterruptCodeAt(unoptimized,
- back_edges.pc(),
- replacement_code);
- }
- }
-
- unoptimized->set_back_edges_patched_for_osr(true);
- ASSERT(Deoptimizer::VerifyInterruptCode(
- isolate, unoptimized, loop_nesting_level));
-}
-
-
-void Deoptimizer::RevertInterruptCode(Isolate* isolate,
- Code* unoptimized) {
- DisallowHeapAllocation no_gc;
- Code* interrupt_code =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
-
- // Iterate over the back edge table and revert the patched interrupt calls.
- ASSERT(unoptimized->back_edges_patched_for_osr());
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
- ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()));
- RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
- }
- }
-
- unoptimized->set_back_edges_patched_for_osr(false);
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
- // Assert that none of the back edges are patched anymore.
- ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
-}
-
-
-#ifdef DEBUG
-bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
- Code* unoptimized,
- int loop_nesting_level) {
- DisallowHeapAllocation no_gc;
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- uint32_t loop_depth = back_edges.loop_depth();
- CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
- // Assert that all back edges for shallower loops (and only those)
- // have already been patched.
- CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()) != NOT_PATCHED);
- }
- return true;
-}
-#endif // DEBUG
-
-
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function
@@ -2484,18 +2415,19 @@ void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
- deferred_objects_double_values_.Add(isolate()->heap()->nan_value()->value());
}
void Deoptimizer::AddObjectDoubleValue(double value) {
deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
- deferred_objects_double_values_.Add(value);
+ HeapNumberMaterializationDescriptor<int> value_desc(
+ deferred_objects_tagged_values_.length() - 1, value);
+ deferred_objects_double_values_.Add(value_desc);
}
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
- HeapNumberMaterializationDescriptor value_desc(
+ HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
deferred_heap_numbers_.Add(value_desc);
}
@@ -2814,46 +2746,11 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
+#define TRANSLATION_OPCODE_CASE(item) case item: return #item;
switch (opcode) {
- case BEGIN:
- return "BEGIN";
- case JS_FRAME:
- return "JS_FRAME";
- case ARGUMENTS_ADAPTOR_FRAME:
- return "ARGUMENTS_ADAPTOR_FRAME";
- case CONSTRUCT_STUB_FRAME:
- return "CONSTRUCT_STUB_FRAME";
- case GETTER_STUB_FRAME:
- return "GETTER_STUB_FRAME";
- case SETTER_STUB_FRAME:
- return "SETTER_STUB_FRAME";
- case COMPILED_STUB_FRAME:
- return "COMPILED_STUB_FRAME";
- case REGISTER:
- return "REGISTER";
- case INT32_REGISTER:
- return "INT32_REGISTER";
- case UINT32_REGISTER:
- return "UINT32_REGISTER";
- case DOUBLE_REGISTER:
- return "DOUBLE_REGISTER";
- case STACK_SLOT:
- return "STACK_SLOT";
- case INT32_STACK_SLOT:
- return "INT32_STACK_SLOT";
- case UINT32_STACK_SLOT:
- return "UINT32_STACK_SLOT";
- case DOUBLE_STACK_SLOT:
- return "DOUBLE_STACK_SLOT";
- case LITERAL:
- return "LITERAL";
- case DUPLICATED_OBJECT:
- return "DUPLICATED_OBJECT";
- case ARGUMENTS_OBJECT:
- return "ARGUMENTS_OBJECT";
- case CAPTURED_OBJECT:
- return "CAPTURED_OBJECT";
+ TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
}
+#undef TRANSLATION_OPCODE_CASE
UNREACHABLE();
return "";
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 7ee5908f76..4e9d281ea5 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -60,17 +60,18 @@ class FrameDescription;
class TranslationIterator;
class DeoptimizedFrameInfo;
+template<typename T>
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
- HeapNumberMaterializationDescriptor(Address slot_address, double val)
- : slot_address_(slot_address), val_(val) { }
+ HeapNumberMaterializationDescriptor(T destination, double value)
+ : destination_(destination), value_(value) { }
- Address slot_address() const { return slot_address_; }
- double value() const { return val_; }
+ T destination() const { return destination_; }
+ double value() const { return value_; }
private:
- Address slot_address_;
- double val_;
+ T destination_;
+ double value_;
};
@@ -131,11 +132,6 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
- enum InterruptPatchState {
- NOT_PATCHED,
- PATCHED_FOR_OSR
- };
-
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -213,39 +209,6 @@ class Deoptimizer : public Malloced {
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
- // Patch all interrupts with allowed loop depth in the unoptimized code to
- // unconditionally call replacement_code.
- static void PatchInterruptCode(Isolate* isolate,
- Code* unoptimized_code);
-
- // Patch the interrupt at the instruction before pc_after in
- // the unoptimized code to unconditionally call replacement_code.
- static void PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code);
-
- // Change all patched interrupts patched in the unoptimized code
- // back to normal interrupts.
- static void RevertInterruptCode(Isolate* isolate,
- Code* unoptimized_code);
-
- // Change patched interrupt in the unoptimized code
- // back to a normal interrupt.
- static void RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code);
-
-#ifdef DEBUG
- static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after);
-
- // Verify that all back edges of a certain loop depth are patched.
- static bool VerifyInterruptCode(Isolate* isolate,
- Code* unoptimized_code,
- int loop_nesting_level);
-#endif // DEBUG
-
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
@@ -469,9 +432,10 @@ class Deoptimizer : public Malloced {
// Deferred values to be materialized.
List<Object*> deferred_objects_tagged_values_;
- List<double> deferred_objects_double_values_;
+ List<HeapNumberMaterializationDescriptor<int> >
+ deferred_objects_double_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
- List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+ List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
@@ -542,7 +506,15 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+ // This convoluted ASSERT is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug builds
+ // when using a plain ASSERT.
+ if (n >= ARRAY_SIZE(registers_)) {
+ ASSERT(false);
+ return 0;
+ }
+#endif
return registers_[n];
}
@@ -717,29 +689,36 @@ class TranslationIterator BASE_EMBEDDED {
};
+#define TRANSLATION_OPCODE_LIST(V) \
+ V(BEGIN) \
+ V(JS_FRAME) \
+ V(CONSTRUCT_STUB_FRAME) \
+ V(GETTER_STUB_FRAME) \
+ V(SETTER_STUB_FRAME) \
+ V(ARGUMENTS_ADAPTOR_FRAME) \
+ V(COMPILED_STUB_FRAME) \
+ V(DUPLICATED_OBJECT) \
+ V(ARGUMENTS_OBJECT) \
+ V(CAPTURED_OBJECT) \
+ V(REGISTER) \
+ V(INT32_REGISTER) \
+ V(UINT32_REGISTER) \
+ V(DOUBLE_REGISTER) \
+ V(STACK_SLOT) \
+ V(INT32_STACK_SLOT) \
+ V(UINT32_STACK_SLOT) \
+ V(DOUBLE_STACK_SLOT) \
+ V(LITERAL)
+
+
class Translation BASE_EMBEDDED {
public:
+#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
enum Opcode {
- BEGIN,
- JS_FRAME,
- CONSTRUCT_STUB_FRAME,
- GETTER_STUB_FRAME,
- SETTER_STUB_FRAME,
- ARGUMENTS_ADAPTOR_FRAME,
- COMPILED_STUB_FRAME,
- DUPLICATED_OBJECT,
- ARGUMENTS_OBJECT,
- CAPTURED_OBJECT,
- REGISTER,
- INT32_REGISTER,
- UINT32_REGISTER,
- DOUBLE_REGISTER,
- STACK_SLOT,
- INT32_STACK_SLOT,
- UINT32_STACK_SLOT,
- DOUBLE_STACK_SLOT,
- LITERAL
+ TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM)
+ LAST = LITERAL
};
+#undef DECLARE_TRANSLATION_OPCODE_ENUM
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
Zone* zone)
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index dd620fb345..d7898ddcd9 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -250,7 +250,7 @@ static int DecodeIt(Isolate* isolate,
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
- } else if (kind == Code::STUB) {
+ } else if (kind == Code::STUB || kind == Code::HANDLER) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Object* obj = heap->code_stubs()->SlowReverseLookup(code);
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 89621cb369..0b745c4505 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -792,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 5fd821b9c0..9fdb194e42 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -75,7 +75,7 @@ v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"First parameter to externalizeString() must be a string."));
return;
}
@@ -84,7 +84,7 @@ void ExternalizeStringExtension::Externalize(
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"Second parameter to externalizeString() must be a boolean."));
return;
}
@@ -92,7 +92,7 @@ void ExternalizeStringExtension::Externalize(
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
return;
}
@@ -120,7 +120,8 @@ void ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- v8::ThrowException(v8::String::New("externalizeString() failed."));
+ args.GetIsolate()->ThrowException(
+ v8::String::New("externalizeString() failed."));
return;
}
}
@@ -129,7 +130,7 @@ void ExternalizeStringExtension::Externalize(
void ExternalizeStringExtension::IsAscii(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
return;
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index acbaf3c862..1dd246fc48 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -79,6 +79,21 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
}
+Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
+ int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries),
+ ConstantPoolArray);
+}
+
+
Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
@@ -126,6 +141,18 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
}
+Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ WeakHashTable::Allocate(isolate()->heap(),
+ at_least_space_for,
+ WeakHashTable::USE_DEFAULT_MINIMUM_CAPACITY,
+ TENURED),
+ WeakHashTable);
+}
+
+
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors);
@@ -511,15 +538,22 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
}
-Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
- AllowDeferredHandleDereference convert_to_cell;
+Handle<PropertyCell> Factory::NewPropertyCellWithHole() {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocatePropertyCell(*value),
+ isolate()->heap()->AllocatePropertyCell(),
PropertyCell);
}
+Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ Handle<PropertyCell> cell = NewPropertyCellWithHole();
+ PropertyCell::SetValueInferType(cell, value);
+ return cell;
+}
+
+
Handle<AllocationSite> Factory::NewAllocationSite() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -598,8 +632,11 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
- int new_length) {
- CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
+ int new_length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ array->CopySize(new_length, pretenure),
+ FixedArray);
}
@@ -609,6 +646,12 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
}
+Handle<ConstantPoolArray> Factory::CopyConstantPoolArray(
+ Handle<ConstantPoolArray> array) {
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), ConstantPoolArray);
+}
+
+
Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
@@ -972,10 +1015,12 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_ref,
bool immovable,
- bool crankshafted) {
+ bool crankshafted,
+ int prologue_offset) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->CreateCode(
- desc, flags, self_ref, immovable, crankshafted),
+ desc, flags, self_ref, immovable, crankshafted,
+ prologue_offset),
Code);
}
@@ -1016,14 +1061,79 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
}
-Handle<GlobalObject> Factory::NewGlobalObject(
- Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateGlobalObject(*constructor),
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
+}
+
+
+static Handle<GlobalObject> NewGlobalObjectFromMap(Isolate* isolate,
+ Handle<Map> map) {
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->Allocate(*map, OLD_POINTER_SPACE),
GlobalObject);
}
+Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> map(constructor->initial_map());
+ ASSERT(map->is_dictionary_map());
+
+ // Make sure no field properties are described in the initial map.
+ // This guarantees us that normalizing the properties does not
+ // require us to change property values to PropertyCells.
+ ASSERT(map->NextFreePropertyIndex() == 0);
+
+ // Make sure we don't have a ton of pre-allocated slots in the
+ // global objects. They will be unused once we normalize the object.
+ ASSERT(map->unused_property_fields() == 0);
+ ASSERT(map->inobject_properties() == 0);
+
+ // Initial size of the backing store to avoid resize of the storage during
+ // bootstrapping. The size differs between the JS global object ad the
+ // builtins object.
+ int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+
+ // Allocate a dictionary object for backing storage.
+ int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
+ Handle<NameDictionary> dictionary = NewNameDictionary(at_least_space_for);
+
+ // The global object might be created from an object template with accessors.
+ // Fill these accessors into the dictionary.
+ Handle<DescriptorArray> descs(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
+ PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
+ Handle<Name> name(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate());
+ Handle<PropertyCell> cell = NewPropertyCell(value);
+ NameDictionaryAdd(dictionary, name, cell, d);
+ }
+
+ // Allocate the global object and initialize it with the backing store.
+ Handle<GlobalObject> global = NewGlobalObjectFromMap(isolate(), map);
+ isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
+
+ // Create a new map for the global object.
+ Handle<Map> new_map = Map::CopyDropDescriptors(map);
+ new_map->set_dictionary_map(true);
+
+ // Set up the global object as a normalized object.
+ global->set_map(*new_map);
+ global->set_properties(*dictionary);
+
+ // Make sure result is a global object with properties in dictionary.
+ ASSERT(global->IsGlobalObject() && !global->HasFastProperties());
+ return global;
+}
+
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
PretenureFlag pretenure,
@@ -1083,16 +1193,6 @@ void Factory::SetContent(Handle<JSArray> array,
}
-void Factory::EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainElements(*elements, length, mode));
-}
-
-
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
Handle<JSFunction> array_buffer_fun(
isolate()->context()->native_context()->array_buffer_fun());
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 1bdf474337..ee25bf23d8 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -59,6 +59,11 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<ConstantPoolArray> NewConstantPoolArray(
+ int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries);
+
Handle<SeededNumberDictionary> NewSeededNumberDictionary(
int at_least_space_for);
@@ -71,6 +76,8 @@ class Factory {
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+ Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for);
+
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
int slack = 0);
Handle<DeoptimizationInputData> NewDeoptimizationInputData(
@@ -241,6 +248,8 @@ class Factory {
Handle<Cell> NewCell(Handle<Object> value);
+ Handle<PropertyCell> NewPropertyCellWithHole();
+
Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
Handle<AllocationSite> NewAllocationSite();
@@ -265,11 +274,15 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
- int new_length);
+ int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
+ Handle<ConstantPoolArray> CopyConstantPoolArray(
+ Handle<ConstantPoolArray> array);
+
// Numbers (e.g. literals) are pretenured by the parser.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
@@ -295,7 +308,7 @@ class Factory {
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
- // Global objects are pretenured.
+ // Global objects are pretenured and initialized based on a constructor.
Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
@@ -328,11 +341,6 @@ class Factory {
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
- void EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode);
-
Handle<JSArrayBuffer> NewJSArrayBuffer();
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
@@ -372,7 +380,8 @@ class Factory {
Code::Flags flags,
Handle<Object> self_reference,
bool immovable = false,
- bool crankshafted = false);
+ bool crankshafted = false,
+ int prologue_offset = Code::kPrologueOffsetNotSet);
Handle<Code> CopyCode(Handle<Code> code);
@@ -462,7 +471,15 @@ class Factory {
&isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR_ACCESSOR
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ inline Handle<Map> name##_map() { \
+ return Handle<Map>(BitCast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
#define STRING_ACCESSOR(name, str) \
inline Handle<String> name() { \
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 08cd8304e4..865413e70d 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -90,44 +90,34 @@
#define DEFINE_implication(whenflag, thenflag)
#endif
+#define COMMA ,
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
-#define JSARGUMENTS_INIT {{}}
struct JSArguments {
public:
- inline int argc() const {
- return static_cast<int>(storage_[0]);
- }
- inline const char** argv() const {
- return reinterpret_cast<const char**>(storage_[1]);
- }
inline const char*& operator[] (int idx) const {
- return argv()[idx];
- }
- inline JSArguments& operator=(JSArguments args) {
- set_argc(args.argc());
- set_argv(args.argv());
- return *this;
+ return argv[idx];
}
static JSArguments Create(int argc, const char** argv) {
JSArguments args;
- args.set_argc(argc);
- args.set_argv(argv);
+ args.argc = argc;
+ args.argv = argv;
return args;
}
-private:
- void set_argc(int argc) {
- storage_[0] = argc;
- }
- void set_argv(const char** argv) {
- storage_[1] = reinterpret_cast<AtomicWord>(argv);
+ int argc;
+ const char** argv;
+};
+
+struct MaybeBoolFlag {
+ static MaybeBoolFlag Create(bool has_value, bool value) {
+ MaybeBoolFlag flag;
+ flag.has_value = has_value;
+ flag.value = value;
+ return flag;
}
-public:
- // Contains argc and argv. Unfortunately we have to store these two fields
- // into a single one to avoid making the initialization macro (which would be
- // "{ 0, NULL }") contain a coma.
- AtomicWord storage_[2];
+ bool has_value;
+ bool value;
};
#endif
@@ -148,10 +138,13 @@ public:
#endif
#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \
+ { false COMMA false }, cmt)
#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+#define DEFINE_args(nam, cmt) FLAG(ARGS, JSArguments, nam, \
+ { 0 COMMA NULL }, cmt)
#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
@@ -183,17 +176,13 @@ DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
-DEFINE_bool(harmony_typed_arrays, true,
- "enable harmony typed arrays")
-DEFINE_bool(harmony_array_buffer, true,
- "enable harmony array buffer")
-DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
"enable harmony numeric literals (0o77, 0b11)")
DEFINE_bool(harmony_strings, false, "enable harmony string")
DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
+DEFINE_bool(harmony_maths, false, "enable harmony math functions")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@@ -206,20 +195,21 @@ DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
DEFINE_implication(harmony, harmony_arrays)
+DEFINE_implication(harmony, harmony_maths)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
-// TODO[dslomov] add harmony => harmony_typed_arrays
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(clever_optimizations,
- true,
+DEFINE_bool(clever_optimizations, true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
// TODO(hpayer): We will remove this flag as soon as we have pretenuring
// support for specific allocation sites.
DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
+DEFINE_bool(allocation_site_pretenuring, false,
+ "pretenure with allocation sites")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -229,6 +219,11 @@ DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
+// Flags for optimization types.
+DEFINE_bool(optimize_for_size, false,
+ "Enables optimizations which favor memory size over execution "
+ "speed.")
+
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_bool(string_slices, true, "use string slices")
@@ -240,7 +235,7 @@ DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
+DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
@@ -251,16 +246,17 @@ DEFINE_int(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
- true,
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
+DEFINE_bool(trace_check_elimination, false, "trace check elimination phase")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter")
DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
+DEFINE_bool(trace_load_elimination, false, "trace load elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
@@ -274,11 +270,9 @@ DEFINE_bool(trace_migration, false, "trace object migration")
DEFINE_bool(trace_generalization, false, "trace map generalization")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
-DEFINE_int(deopt_every_n_times,
- 0,
+DEFINE_int(deopt_every_n_times, 0,
"deoptimize every n times a deopt point is passed")
-DEFINE_int(deopt_every_n_garbage_collections,
- 0,
+DEFINE_int(deopt_every_n_garbage_collections, 0,
"deoptimize every n garbage collections")
DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
@@ -295,11 +289,12 @@ DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
+DEFINE_bool(load_elimination, false, "use load elimination")
+DEFINE_bool(check_elimination, false, "use check elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
-DEFINE_bool(unreachable_code_elimination, false,
- "eliminate unreachable code (hidden behind soft deopts)")
+DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
@@ -316,6 +311,8 @@ DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
+DEFINE_int(escape_analysis_iterations, 1,
+ "maximum number of escape analysis fix-point iterations")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
@@ -331,8 +328,11 @@ DEFINE_int(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_int(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
+DEFINE_bool(block_concurrent_recompilation, false,
+ "block queued jobs until released")
DEFINE_bool(concurrent_osr, false,
"concurrent on-stack replacement")
+DEFINE_implication(concurrent_osr, concurrent_recompilation)
DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
@@ -404,8 +404,7 @@ DEFINE_bool(enable_vldr_imm, false,
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
-DEFINE_string(expose_gc_as,
- NULL,
+DEFINE_string(expose_gc_as, NULL,
"expose gc extension under the specified name")
DEFINE_implication(expose_gc_as, expose_gc)
DEFINE_bool(expose_externalize_string, false,
@@ -426,8 +425,7 @@ DEFINE_bool(stack_trace_on_abort, true,
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(mask_constants_with_cookie,
- true,
+DEFINE_bool(mask_constants_with_cookie, true,
"use random jit cookie to mask large constants")
// codegen.cc
@@ -515,6 +513,8 @@ DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
"make maps embedded in optimized code weak")
+DEFINE_bool(weak_embedded_objects_in_optimized_code, true,
+ "make objects embedded in optimized code weak")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
@@ -533,18 +533,21 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
-DEFINE_bool(parallel_marking, false, "enable parallel marking")
-DEFINE_int(marking_threads, 0, "number of parallel marking threads")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
+
+// heap-snapshot-generator.cc
+DEFINE_bool(heap_profiler_trace_objects, false,
+ "Dump heap object allocations/movements/size_updates")
+
+
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
-DEFINE_bool(js_accessor_ics, false, "create ics for js accessors")
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
@@ -592,15 +595,17 @@ DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
"preallocate some memory to build stack traces.")
-DEFINE_bool(randomize_hashes,
- true,
+DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
-DEFINE_int(hash_seed,
- 0,
+DEFINE_int(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
+// snapshot-common.cc
+DEFINE_bool(profile_deserialization, false,
+ "Print the time it takes to deserialize the snapshot.")
+
// v8.cc
DEFINE_bool(preemption, false,
"activate a 100ms timer that switches between V8 threads")
@@ -610,6 +615,7 @@ DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
+DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag")
DEFINE_int(testing_int_flag, 13, "testing_int_flag")
DEFINE_float(testing_float_flag, 2.5, "float-flag")
DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
@@ -626,6 +632,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
" the snapshot (mksnapshot only)")
+// code-stubs-hydrogen.cc
+DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
+ "Print the time it takes to lazily compile hydrogen code stubs.")
+
//
// Dev shell flags
//
@@ -642,7 +652,7 @@ DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSARGUMENTS_INIT,
+DEFINE_args(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)
@@ -686,8 +696,10 @@ DEFINE_bool(stress_compaction, false,
#endif
// checks.cc
+#ifdef ENABLE_SLOW_ASSERTS
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
+#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
@@ -724,8 +736,7 @@ DEFINE_bool(print_interface_details, false, "print interface inference details")
DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
// objects.cc
-DEFINE_bool(trace_normalization,
- false,
+DEFINE_bool(trace_normalization, false,
"prints when objects are turned into dictionaries.")
// runtime.cc
@@ -739,12 +750,10 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes")
// Regexp
-DEFINE_bool(regexp_possessive_quantifier,
- false,
+DEFINE_bool(regexp_possessive_quantifier, false,
"enable possessive quantifier syntax for testing")
DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_bool(trace_regexp_assembler,
- false,
+DEFINE_bool(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
//
@@ -773,6 +782,7 @@ DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
@@ -795,16 +805,18 @@ DEFINE_implication(log_internal_timer_events, prof)
// elements.cc
DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+DEFINE_bool(trace_creation_allocation_sites, false,
+ "trace the creation of allocation sites")
+
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")
-DEFINE_bool(test_secondary_stub_cache,
- false,
+DEFINE_bool(test_secondary_stub_cache, false,
"test secondary stub cache by disabling the primary one")
-DEFINE_bool(test_primary_stub_cache,
- false,
+DEFINE_bool(test_primary_stub_cache, false,
"test primary stub cache by disabling the secondary one")
+
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(print_code, false, "print generated code")
DEFINE_bool(print_opt_code, false, "print optimized code")
@@ -812,8 +824,19 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+DEFINE_bool(emit_opt_code_positions, false,
+ "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
+DEFINE_bool(sodium, false, "print generated code output suitable for use with "
+ "the Sodium code viewer")
+
+DEFINE_implication(sodium, print_code_stubs)
+DEFINE_implication(sodium, print_code)
+DEFINE_implication(sodium, print_opt_code)
+DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, code_comments)
+
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
DEFINE_implication(print_all_code, print_code)
DEFINE_implication(print_all_code, print_opt_code)
@@ -827,6 +850,16 @@ DEFINE_implication(print_all_code, trace_codegen)
#endif
#endif
+//
+// Read-only flags
+//
+#undef FLAG
+#define FLAG FLAG_READONLY
+
+// assembler-arm.h
+DEFINE_bool(enable_ool_constant_pool, false,
+ "enable use of out-of-line constant pools (ARM only)")
+
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
@@ -834,6 +867,7 @@ DEFINE_implication(print_all_code, trace_codegen)
#undef FLAG_ALIAS
#undef DEFINE_bool
+#undef DEFINE_maybe_bool
#undef DEFINE_int
#undef DEFINE_string
#undef DEFINE_float
@@ -850,3 +884,5 @@ DEFINE_implication(print_all_code, trace_codegen)
#undef FLAG_MODE_DEFINE_DEFAULTS
#undef FLAG_MODE_META
#undef FLAG_MODE_DEFINE_IMPLICATIONS
+
+#undef COMMA
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 4e18cc8c80..0c36aed332 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -55,7 +55,8 @@ namespace {
// to the actual flag, default value, comment, etc. This is designed to be POD
// initialized as to avoid requiring static constructors.
struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
+ enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT,
+ TYPE_STRING, TYPE_ARGS };
FlagType type_; // What type of flag, bool, int, or string.
const char* name_; // Name of the flag, ex "my_flag".
@@ -75,6 +76,11 @@ struct Flag {
return reinterpret_cast<bool*>(valptr_);
}
+ MaybeBoolFlag* maybe_bool_variable() const {
+ ASSERT(type_ == TYPE_MAYBE_BOOL);
+ return reinterpret_cast<MaybeBoolFlag*>(valptr_);
+ }
+
int* int_variable() const {
ASSERT(type_ == TYPE_INT);
return reinterpret_cast<int*>(valptr_);
@@ -133,6 +139,8 @@ struct Flag {
switch (type_) {
case TYPE_BOOL:
return *bool_variable() == bool_default();
+ case TYPE_MAYBE_BOOL:
+ return maybe_bool_variable()->has_value == false;
case TYPE_INT:
return *int_variable() == int_default();
case TYPE_FLOAT:
@@ -145,7 +153,7 @@ struct Flag {
return strcmp(str1, str2) == 0;
}
case TYPE_ARGS:
- return args_variable()->argc() == 0;
+ return args_variable()->argc == 0;
}
UNREACHABLE();
return true;
@@ -157,6 +165,9 @@ struct Flag {
case TYPE_BOOL:
*bool_variable() = bool_default();
break;
+ case TYPE_MAYBE_BOOL:
+ *maybe_bool_variable() = MaybeBoolFlag::Create(false, false);
+ break;
case TYPE_INT:
*int_variable() = int_default();
break;
@@ -186,6 +197,7 @@ const size_t num_flags = sizeof(flags) / sizeof(*flags);
static const char* Type2String(Flag::FlagType type) {
switch (type) {
case Flag::TYPE_BOOL: return "bool";
+ case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
case Flag::TYPE_INT: return "int";
case Flag::TYPE_FLOAT: return "float";
case Flag::TYPE_STRING: return "string";
@@ -203,6 +215,11 @@ static SmartArrayPointer<const char> ToString(Flag* flag) {
case Flag::TYPE_BOOL:
buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ buffer.Add("%s", flag->maybe_bool_variable()->has_value
+ ? (flag->maybe_bool_variable()->value ? "true" : "false")
+ : "unset");
+ break;
case Flag::TYPE_INT:
buffer.Add("%d", *flag->int_variable());
break;
@@ -216,9 +233,9 @@ static SmartArrayPointer<const char> ToString(Flag* flag) {
}
case Flag::TYPE_ARGS: {
JSArguments args = *flag->args_variable();
- if (args.argc() > 0) {
+ if (args.argc > 0) {
buffer.Add("%s", args[0]);
- for (int i = 1; i < args.argc(); i++) {
+ for (int i = 1; i < args.argc; i++) {
buffer.Add(" %s", args[i]);
}
}
@@ -260,7 +277,7 @@ List<const char*>* FlagList::argv() {
buffer.Add("--%s", args_flag->name());
args->Add(buffer.ToCString().Detach());
JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc(); j++) {
+ for (int j = 0; j < jsargs.argc; j++) {
args->Add(StrDup(jsargs[j]));
}
}
@@ -380,6 +397,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// if we still need a flag value, use the next argument if available
if (flag->type() != Flag::TYPE_BOOL &&
+ flag->type() != Flag::TYPE_MAYBE_BOOL &&
flag->type() != Flag::TYPE_ARGS &&
value == NULL) {
if (i < *argc) {
@@ -399,6 +417,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_BOOL:
*flag->bool_variable() = !is_bool;
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ *flag->maybe_bool_variable() = MaybeBoolFlag::Create(true, !is_bool);
+ break;
case Flag::TYPE_INT:
*flag->int_variable() = strtol(value, &endp, 10); // NOLINT
break;
@@ -425,8 +446,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
}
// handle errors
- if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
- (flag->type() != Flag::TYPE_BOOL && is_bool) ||
+ bool is_bool_type = flag->type() == Flag::TYPE_BOOL ||
+ flag->type() == Flag::TYPE_MAYBE_BOOL;
+ if ((is_bool_type && value != NULL) || (!is_bool_type && is_bool) ||
*endp != '\0') {
PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
@@ -549,6 +571,7 @@ void FlagList::PrintHelp() {
}
+// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
#include "flag-definitions.h"
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 2bbbd98ac0..d2dbfe2815 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -922,6 +922,13 @@ class StackFrameLocator BASE_EMBEDDED {
};
+// Used specify the type of prologue to generate.
+enum PrologueFrameMode {
+ BUILD_FUNCTION_FRAME,
+ BUILD_STUB_FRAME
+};
+
+
// Reads all frames on the current stack and copies them into the current
// zone memory.
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 91a51731a5..fec9ee565d 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -193,12 +193,16 @@ void BreakableStatementChecker::VisitDebuggerStatement(
}
+void BreakableStatementChecker::VisitCaseClause(CaseClause* clause) {
+}
+
+
void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void BreakableStatementChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
+void BreakableStatementChecker::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
}
@@ -341,8 +345,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
- code->set_has_debug_break_slots(
- info->isolate()->debugger()->IsDebuggerActive());
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
@@ -826,7 +828,7 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->position());
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
@@ -836,7 +838,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
// breakable. For breakable statements the actual recording of the
// position will be postponed to the breakable code (typically an IC).
bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
+ masm_, stmt->position(), !checker.is_breakable());
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
@@ -844,15 +846,15 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
}
}
#else
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->position());
#endif
}
-void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
+void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
+ CodeGenerator::RecordPositions(masm_, expr->position());
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
@@ -866,7 +868,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
// statement positions this is used for e.g. the condition expression of
// a do while loop.
bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
+ masm_, expr->position(), !checker.is_breakable());
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
@@ -1293,7 +1295,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// possible to break on the condition.
__ bind(loop_statement.continue_label());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- SetExpressionPosition(stmt->cond(), stmt->condition_position());
+ SetExpressionPosition(stmt->cond());
VisitForControl(stmt->cond(),
&book_keeping,
loop_statement.break_label(),
@@ -1515,6 +1517,11 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
+void FullCodeGenerator::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
@@ -1522,8 +1529,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
__ bind(&true_case);
- SetExpressionPosition(expr->then_expression(),
- expr->then_expression_position());
+ SetExpressionPosition(expr->then_expression());
if (context()->IsTest()) {
const TestContext* for_test = TestContext::cast(context());
VisitForControl(expr->then_expression(),
@@ -1537,8 +1543,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
- SetExpressionPosition(expr->else_expression(),
- expr->else_expression_position());
+ SetExpressionPosition(expr->else_expression());
VisitInDuplicateContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
@@ -1567,10 +1572,33 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- EmitNewClosure(expr->shared_function_info(), false);
+void FullCodeGenerator::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ NativeFunctionLiteral");
+
+ // Compute the function template for the native function.
+ Handle<String> name = expr->name();
+ v8::Handle<v8::FunctionTemplate> fun_template =
+ expr->extension()->GetNativeFunction(v8::Utils::ToLocal(name));
+ ASSERT(!fun_template.IsEmpty());
+
+ // Instantiate the function and create a shared function info from it.
+ Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
+ const int literals = fun->NumberOfLiterals();
+ Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
+ bool is_generator = false;
+ Handle<SharedFunctionInfo> shared =
+ isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator,
+ code, Handle<ScopeInfo>(fun->shared()->scope_info()));
+ shared->set_construct_stub(*construct_stub);
+
+ // Copy the function data to the shared function info.
+ shared->set_function_data(fun->shared()->function_data());
+ int parameters = fun->shared()->formal_parameter_count();
+ shared->set_formal_parameter_count(parameters);
+
+ EmitNewClosure(shared, false);
}
@@ -1615,6 +1643,100 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
}
+void BackEdgeTable::Patch(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
+ // Iterate over the back edge table and patch every interrupt
+ // call to an unconditional call to the replacement code.
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) {
+ ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(true);
+ ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
+}
+
+
+void BackEdgeTable::Revert(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
+ // Iterate over the back edge table and revert the patched interrupt calls.
+ ASSERT(unoptimized->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
+ ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(false);
+ unoptimized->set_allow_osr_at_loop_nesting_level(0);
+ // Assert that none of the back edges are patched anymore.
+ ASSERT(Verify(isolate, unoptimized, -1));
+}
+
+
+void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = info->isolate();
+ Code* code = info->shared_info()->code();
+ Address pc = code->instruction_start() + info->osr_pc_offset();
+ ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+ Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
+ PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+}
+
+
+void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = info->isolate();
+ Code* code = info->shared_info()->code();
+ Address pc = code->instruction_start() + info->osr_pc_offset();
+ if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+ Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+ }
+}
+
+
+#ifdef DEBUG
+bool BackEdgeTable::Verify(Isolate* isolate,
+ Code* unoptimized,
+ int loop_nesting_level) {
+ DisallowHeapAllocation no_gc;
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ uint32_t loop_depth = back_edges.loop_depth(i);
+ CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
+ // Assert that all back edges for shallower loops (and only those)
+ // have already been patched.
+ CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
+ GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)) != INTERRUPT);
+ }
+ return true;
+}
+#endif // DEBUG
+
+
#undef __
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 5580cb3e86..e27662e0e3 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -139,65 +139,6 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
- class BackEdgeTableIterator {
- public:
- explicit BackEdgeTableIterator(Code* unoptimized,
- DisallowHeapAllocation* required) {
- ASSERT(unoptimized->kind() == Code::FUNCTION);
- instruction_start_ = unoptimized->instruction_start();
- cursor_ = instruction_start_ + unoptimized->back_edge_table_offset();
- ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size());
- table_length_ = Memory::uint32_at(cursor_);
- cursor_ += kTableLengthSize;
- end_ = cursor_ + table_length_ * kEntrySize;
- }
-
- bool Done() { return cursor_ >= end_; }
-
- void Next() {
- ASSERT(!Done());
- cursor_ += kEntrySize;
- }
-
- BailoutId ast_id() {
- ASSERT(!Done());
- return BailoutId(static_cast<int>(
- Memory::uint32_at(cursor_ + kAstIdOffset)));
- }
-
- uint32_t loop_depth() {
- ASSERT(!Done());
- return Memory::uint32_at(cursor_ + kLoopDepthOffset);
- }
-
- uint32_t pc_offset() {
- ASSERT(!Done());
- return Memory::uint32_at(cursor_ + kPcOffsetOffset);
- }
-
- Address pc() {
- ASSERT(!Done());
- return instruction_start_ + pc_offset();
- }
-
- uint32_t table_length() { return table_length_; }
-
- private:
- static const int kTableLengthSize = kIntSize;
- static const int kAstIdOffset = 0 * kIntSize;
- static const int kPcOffsetOffset = 1 * kIntSize;
- static const int kLoopDepthOffset = 2 * kIntSize;
- static const int kEntrySize = 3 * kIntSize;
-
- Address cursor_;
- Address end_;
- Address instruction_start_;
- uint32_t table_length_;
-
- DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator);
- };
-
-
private:
class Breakable;
class Iteration;
@@ -635,7 +576,7 @@ class FullCodeGenerator: public AstVisitor {
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr, int pos);
+ void SetExpressionPosition(Expression* expr);
void SetStatementPosition(int pos);
void SetSourcePosition(int pos);
@@ -940,6 +881,93 @@ class AccessorTable: public TemplateHashMap<Literal,
};
+class BackEdgeTable {
+ public:
+ BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
+ ASSERT(code->kind() == Code::FUNCTION);
+ instruction_start_ = code->instruction_start();
+ Address table_address = instruction_start_ + code->back_edge_table_offset();
+ length_ = Memory::uint32_at(table_address);
+ start_ = table_address + kTableLengthSize;
+ }
+
+ uint32_t length() { return length_; }
+
+ BailoutId ast_id(uint32_t index) {
+ return BailoutId(static_cast<int>(
+ Memory::uint32_at(entry_at(index) + kAstIdOffset)));
+ }
+
+ uint32_t loop_depth(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kLoopDepthOffset);
+ }
+
+ uint32_t pc_offset(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kPcOffsetOffset);
+ }
+
+ Address pc(uint32_t index) {
+ return instruction_start_ + pc_offset(index);
+ }
+
+ enum BackEdgeState {
+ INTERRUPT,
+ ON_STACK_REPLACEMENT,
+ OSR_AFTER_STACK_CHECK
+ };
+
+ // Patch all interrupts with allowed loop depth in the unoptimized code to
+ // unconditionally call replacement_code.
+ static void Patch(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Patch the back edge to the target state, provided the correct callee.
+ static void PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code);
+
+ // Change all patched back edges back to normal interrupts.
+ static void Revert(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Change a back edge patched for on-stack replacement to perform a
+ // stack check first.
+ static void AddStackCheck(CompilationInfo* info);
+
+ // Remove the stack check, if available, and replace by on-stack replacement.
+ static void RemoveStackCheck(CompilationInfo* info);
+
+ // Return the current patch state of the back edge.
+ static BackEdgeState GetBackEdgeState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
+
+#ifdef DEBUG
+ // Verify that all back edges of a certain loop depth are patched.
+ static bool Verify(Isolate* isolate,
+ Code* unoptimized_code,
+ int loop_nesting_level);
+#endif // DEBUG
+
+ private:
+ Address entry_at(uint32_t index) {
+ ASSERT(index < length_);
+ return start_ + index * kEntrySize;
+ }
+
+ static const int kTableLengthSize = kIntSize;
+ static const int kAstIdOffset = 0 * kIntSize;
+ static const int kPcOffsetOffset = 1 * kIntSize;
+ static const int kLoopDepthOffset = 2 * kIntSize;
+ static const int kEntrySize = 3 * kIntSize;
+
+ Address start_;
+ Address instruction_start_;
+ uint32_t length_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 1a98e49ff3..2ebe1c0088 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -79,7 +79,7 @@ class GlobalHandles::Node {
Internals::kNodeIsPartiallyDependentShift);
}
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
~Node() {
// TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 1977e68c82..3456030b7e 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -232,6 +232,8 @@ const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kCharSize = sizeof(char); // NOLINT
const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int); // NOLINT
+const int kInt32Size = sizeof(int32_t); // NOLINT
+const int kInt64Size = sizeof(int64_t); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
@@ -248,10 +250,12 @@ const int kRandomStateSize = 2 * kIntSize;
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
+const bool kIs64BitArch = true;
#else
const int kPointerSizeLog2 = 2;
const intptr_t kIntptrSignBit = 0x80000000;
const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
+const bool kIs64BitArch = false;
#endif
const int kBitsPerByte = 8;
@@ -354,7 +358,7 @@ F FUNCTION_CAST(Address addr) {
// Define DISABLE_ASAN macros.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
-#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
#endif
#endif
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 5b879d8f08..ec69c3fdbe 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -130,16 +130,17 @@ void HandleScope::CloseScope(Isolate* isolate,
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
- current->next = prev_next;
+ std::swap(current->next, prev_next);
current->level--;
if (current->limit != prev_limit) {
current->limit = prev_limit;
DeleteExtensions(isolate);
- }
-
-#ifdef ENABLE_EXTRA_CHECKS
- ZapRange(prev_next, prev_limit);
+#ifdef ENABLE_HANDLE_ZAPPING
+ ZapRange(current->next, prev_limit);
+ } else {
+ ZapRange(current->next, prev_next);
#endif
+ }
}
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index b3704df698..4cb1827d8e 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -101,7 +101,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
}
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
void HandleScope::ZapRange(Object** start, Object** end) {
ASSERT(end - start <= kHandleBlockSize);
for (Object** p = start; p != end; p++) {
@@ -150,54 +150,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
}
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. More over, the inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (func->shared()->live_objects_may_exist()) return;
-
- func->shared()->set_expected_nof_properties(nof);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMap(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(nof);
- func->set_initial_map(*new_initial_map);
- }
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (Serializer::enabled()) return estimate + 2;
-
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- if (FLAG_clever_optimizations) {
- return estimate + 8;
- } else {
- return estimate + 3;
- }
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
- shared->set_expected_nof_properties(
- ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
@@ -285,30 +237,6 @@ Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
}
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->GetIsolate(),
- str->SubString(start, end, pretenure), String);
-}
-
-
-Handle<JSObject> Copy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- obj->DeepCopy(isolate),
- JSObject);
-}
-
-
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
@@ -905,4 +833,15 @@ DeferredHandles* DeferredHandleScope::Detach() {
}
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code) {
+ heap->EnsureWeakObjectToCodeTable();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object));
+ dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code);
+ CALL_HEAP_FUNCTION_VOID(heap->isolate(),
+ heap->AddWeakObjectToCodeDependency(*object, *dep));
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index c3e4dca1a6..cfdecac190 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -177,7 +177,7 @@ class HandleScope {
// Extend the handle scope making room for more handles.
static internal::Object** Extend(Isolate* isolate);
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(Object** start, Object** end);
#endif
@@ -255,10 +255,6 @@ Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index);
-Handle<JSObject> Copy(Handle<JSObject> obj);
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj);
-
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);
@@ -303,19 +299,6 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second);
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate);
-
-
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
@@ -330,6 +313,9 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value);
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code);
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js
new file mode 100644
index 0000000000..a4d3f2e8a5
--- /dev/null
+++ b/deps/v8/src/harmony-math.js
@@ -0,0 +1,60 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// ES6 draft 09-27-13, section 20.2.2.28.
+function MathSign(x) {
+ x = TO_NUMBER_INLINE(x);
+ if (x > 0) return 1;
+ if (x < 0) return -1;
+ if (x === 0) return x;
+ return NAN;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.34.
+function MathTrunc(x) {
+ x = TO_NUMBER_INLINE(x);
+ if (x > 0) return MathFloor(x);
+ if (x < 0) return MathCeil(x);
+ if (x === 0) return x;
+ return NAN;
+}
+
+
+function ExtendMath() {
+ %CheckIsBootstrapping();
+
+ // Set up the non-enumerable functions on the Math object.
+ InstallFunctions($Math, DONT_ENUM, $Array(
+ "sign", MathSign,
+ "trunc", MathTrunc
+ ));
+}
+
+ExtendMath();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 4f1960386a..ad6f44f935 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -140,12 +140,11 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
// Compute map and object size.
Map* map = ascii_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -174,12 +173,11 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
// Compute map and object size.
Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -208,10 +206,17 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
}
+MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+ return CopyConstantPoolArrayWithMap(src, src->map());
+}
+
+
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
- ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHandleAllocation::IsAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE ||
@@ -291,40 +296,6 @@ void Heap::FinalizeExternalString(String* string) {
}
-MaybeObject* Heap::AllocateRawMap() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = cell_space_->AllocateRaw(Cell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawPropertyCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result =
- property_cell_space_->AllocateRaw(PropertyCell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
bool Heap::InNewSpace(Object* object) {
bool result = new_space_.Contains(object);
ASSERT(!result || // Either not in new space
@@ -525,6 +496,13 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
+ if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
+ if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) !=
+ NULL) {
+ object->GetIsolate()->heap()->allocation_mementos_found_++;
+ }
+ }
+
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
// Call the slow part of scavenge object.
@@ -532,14 +510,6 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
-MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
- allocation_site, DONT_INITIALIZE_ARRAY_ELEMENTS);
-}
-
-
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
@@ -847,15 +817,15 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
#ifdef VERIFY_HEAP
-NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
+NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_embedded_maps_verification_scope_depth_++;
+ isolate->heap()->no_weak_object_verification_scope_depth_++;
}
-NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
+NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_embedded_maps_verification_scope_depth_--;
+ isolate->heap()->no_weak_object_verification_scope_depth_--;
}
#endif
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index e66af3364d..6b159a98a3 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "deoptimizer.h"
#include "heap-profiler.h"
#include "heap-snapshot-generator-inl.h"
@@ -35,7 +36,8 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: snapshots_(new HeapSnapshotsCollection(heap)),
- next_snapshot_uid_(1) {
+ next_snapshot_uid_(1),
+ is_tracking_allocations_(false) {
}
@@ -132,14 +134,86 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
}
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- snapshots_->ObjectMoveEvent(from, to);
+void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+ snapshots_->ObjectMoveEvent(from, to, size);
}
+
+void HeapProfiler::NewObjectEvent(Address addr, int size) {
+ snapshots_->NewObjectEvent(addr, size);
+}
+
+
+void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
+ snapshots_->UpdateObjectSizeEvent(addr, size);
+}
+
+
void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
RetainedObjectInfo* info) {
// TODO(yurus, marja): Don't route this information through GlobalHandles.
heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
}
+
+void HeapProfiler::StartHeapAllocationsRecording() {
+ StartHeapObjectsTracking();
+ is_tracking_allocations_ = true;
+ DropCompiledCode();
+ snapshots_->UpdateHeapObjectsMap();
+}
+
+
+void HeapProfiler::StopHeapAllocationsRecording() {
+ StopHeapObjectsTracking();
+ is_tracking_allocations_ = false;
+ DropCompiledCode();
+}
+
+
+void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate,
+ Address obj,
+ int size) {
+ isolate->heap_profiler()->NewObjectEvent(obj, size);
+}
+
+
+void HeapProfiler::DropCompiledCode() {
+ Isolate* isolate = heap()->isolate();
+ HandleScope scope(isolate);
+
+ if (FLAG_concurrent_recompilation) {
+ isolate->optimizing_compiler_thread()->Flush();
+ }
+
+ Deoptimizer::DeoptimizeAll(isolate);
+
+ Handle<Code> lazy_compile =
+ Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
+
+ heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "switch allocations tracking");
+
+ DisallowHeapAllocation no_allocation;
+
+ HeapIterator iterator(heap());
+ HeapObject* obj = NULL;
+ while (((obj = iterator.next()) != NULL)) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ SharedFunctionInfo* shared = function->shared();
+
+ if (!shared->allows_lazy_compilation()) continue;
+ if (!shared->script()->IsScript()) continue;
+
+ Code::Kind kind = function->code()->kind();
+ if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
+ function->set_code(*lazy_compile);
+ shared->set_code(*lazy_compile);
+ }
+ }
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 5ae60fa923..74002278d4 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -37,14 +37,6 @@ namespace internal {
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(heap, call) \
- do { \
- v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
- if (profiler != NULL && profiler->is_profiling()) { \
- profiler->call; \
- } \
- } while (false)
-
class HeapProfiler {
public:
explicit HeapProfiler(Heap* heap);
@@ -63,13 +55,22 @@ class HeapProfiler {
void StartHeapObjectsTracking();
void StopHeapObjectsTracking();
+
+ static void RecordObjectAllocationFromMasm(Isolate* isolate,
+ Address obj,
+ int size);
+
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();
- void ObjectMoveEvent(Address from, Address to);
+ void ObjectMoveEvent(Address from, Address to, int size);
+
+ void NewObjectEvent(Address addr, int size);
+
+ void UpdateObjectSizeEvent(Address addr, int size);
void DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
@@ -82,12 +83,26 @@ class HeapProfiler {
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ bool is_tracking_allocations() {
+ return is_tracking_allocations_;
+ }
+
+ void StartHeapAllocationsRecording();
+ void StopHeapAllocationsRecording();
+
+ int FindUntrackedObjects() {
+ return snapshots_->FindUntrackedObjects();
+ }
+
+ void DropCompiledCode();
+
private:
Heap* heap() const { return snapshots_->heap(); }
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+ bool is_tracking_allocations_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index bd47eec63b..10d113c3d1 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -29,6 +29,7 @@
#include "heap-snapshot-generator-inl.h"
+#include "allocation-tracker.h"
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
@@ -397,7 +398,7 @@ void HeapObjectsMap::SnapshotGenerationFinished() {
}
-void HeapObjectsMap::MoveObject(Address from, Address to) {
+void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
if (from == to) return;
@@ -428,11 +429,39 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
int from_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(from_value));
entries_.at(from_entry_info_index).addr = to;
+ // Size of an object can change during its life, so to keep information
+ // about the object in entries_ consistent, we have to adjust size when the
+ // object is migrated.
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Move object from %p to %p old size %6d new size %6d\n",
+ from,
+ to,
+ entries_.at(from_entry_info_index).size,
+ object_size);
+ }
+ entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
}
+void HeapObjectsMap::NewObject(Address addr, int size) {
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("New object : %p %6d. Next address is %p\n",
+ addr,
+ size,
+ addr + size);
+ }
+ ASSERT(addr != NULL);
+ FindOrAddEntry(addr, size, false);
+}
+
+
+void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
+ FindOrAddEntry(addr, size, false);
+}
+
+
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
false);
@@ -445,7 +474,8 @@ SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
- unsigned int size) {
+ unsigned int size,
+ bool accessed) {
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
true);
@@ -453,14 +483,20 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
- entry_info.accessed = true;
+ entry_info.accessed = accessed;
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Update object size : %p with old size %d and new size %d\n",
+ addr,
+ entry_info.size,
+ size);
+ }
entry_info.size = size;
return entry_info.id;
}
entry->value = reinterpret_cast<void*>(entries_.length());
SnapshotObjectId id = next_id_;
next_id_ += kObjectIdStep;
- entries_.Add(EntryInfo(id, addr, size));
+ entries_.Add(EntryInfo(id, addr, size, accessed));
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
return id;
}
@@ -472,6 +508,10 @@ void HeapObjectsMap::StopHeapObjectsTracking() {
void HeapObjectsMap::UpdateHeapObjectsMap() {
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+ entries_map_.occupancy());
+ }
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::UpdateHeapObjectsMap");
HeapIterator iterator(heap_);
@@ -479,8 +519,129 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
obj != NULL;
obj = iterator.next()) {
FindOrAddEntry(obj->address(), obj->Size());
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Update object : %p %6d. Next address is %p\n",
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ }
}
RemoveDeadEntries();
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+ entries_map_.occupancy());
+ }
+}
+
+
+namespace {
+
+
+struct HeapObjectInfo {
+ HeapObjectInfo(HeapObject* obj, int expected_size)
+ : obj(obj),
+ expected_size(expected_size) {
+ }
+
+ HeapObject* obj;
+ int expected_size;
+
+ bool IsValid() const { return expected_size == obj->Size(); }
+
+ void Print() const {
+ if (expected_size == 0) {
+ PrintF("Untracked object : %p %6d. Next address is %p\n",
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ } else if (obj->Size() != expected_size) {
+ PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
+ expected_size,
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ } else {
+ PrintF("Good object : %p %6d. Next address is %p\n",
+ obj->address(),
+ expected_size,
+ obj->address() + obj->Size());
+ }
+ }
+};
+
+
+static int comparator(const HeapObjectInfo* a, const HeapObjectInfo* b) {
+ if (a->obj < b->obj) return -1;
+ if (a->obj > b->obj) return 1;
+ return 0;
+}
+
+
+} // namespace
+
+
+int HeapObjectsMap::FindUntrackedObjects() {
+ List<HeapObjectInfo> heap_objects(1000);
+
+ HeapIterator iterator(heap_);
+ int untracked = 0;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ HashMap::Entry* entry = entries_map_.Lookup(
+ obj->address(), ComputePointerHash(obj->address()), false);
+ if (entry == NULL) {
+ ++untracked;
+ if (FLAG_heap_profiler_trace_objects) {
+ heap_objects.Add(HeapObjectInfo(obj, 0));
+ }
+ } else {
+ int entry_index = static_cast<int>(
+ reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ if (FLAG_heap_profiler_trace_objects) {
+ heap_objects.Add(HeapObjectInfo(obj,
+ static_cast<int>(entry_info.size)));
+ if (obj->Size() != static_cast<int>(entry_info.size))
+ ++untracked;
+ } else {
+ CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+ }
+ }
+ }
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("\nBegin HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n",
+ entries_map_.occupancy());
+ heap_objects.Sort(comparator);
+ int last_printed_object = -1;
+ bool print_next_object = false;
+ for (int i = 0; i < heap_objects.length(); ++i) {
+ const HeapObjectInfo& object_info = heap_objects[i];
+ if (!object_info.IsValid()) {
+ ++untracked;
+ if (last_printed_object != i - 1) {
+ if (i > 0) {
+ PrintF("%d objects were skipped\n", i - 1 - last_printed_object);
+ heap_objects[i - 1].Print();
+ }
+ }
+ object_info.Print();
+ last_printed_object = i;
+ print_next_object = true;
+ } else if (print_next_object) {
+ object_info.Print();
+ print_next_object = false;
+ last_printed_object = i;
+ }
+ }
+ if (last_printed_object < heap_objects.length() - 1) {
+ PrintF("Last %d objects were skipped\n",
+ heap_objects.length() - 1 - last_printed_object);
+ }
+ PrintF("End HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n\n",
+ entries_map_.occupancy());
+ }
+ return untracked;
}
@@ -587,7 +748,8 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
: is_tracking_objects_(false),
names_(heap),
- ids_(heap) {
+ ids_(heap),
+ allocation_tracker_(NULL) {
}
@@ -597,10 +759,29 @@ static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete allocation_tracker_;
snapshots_.Iterate(DeleteHeapSnapshot);
}
+void HeapSnapshotsCollection::StartHeapObjectsTracking() {
+ ids_.UpdateHeapObjectsMap();
+ if (allocation_tracker_ == NULL) {
+ allocation_tracker_ = new AllocationTracker(&ids_, names());
+ }
+ is_tracking_objects_ = true;
+}
+
+
+void HeapSnapshotsCollection::StopHeapObjectsTracking() {
+ ids_.StopHeapObjectsTracking();
+ if (allocation_tracker_ != NULL) {
+ delete allocation_tracker_;
+ allocation_tracker_ = NULL;
+ }
+}
+
+
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
@@ -644,6 +825,15 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
}
+void HeapSnapshotsCollection::NewObjectEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ ids_.NewObject(addr, size);
+ if (allocation_tracker_ != NULL) {
+ allocation_tracker_->NewObjectEvent(addr, size);
+ }
+}
+
+
size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
size_t size = sizeof(*this);
size += names_.GetUsedMemorySize();
@@ -1301,6 +1491,10 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite* site) {
SetInternalReference(site, entry, "transition_info", site->transition_info(),
AllocationSite::kTransitionInfoOffset);
+ SetInternalReference(site, entry, "nested_site", site->nested_site(),
+ AllocationSite::kNestedSiteOffset);
+ SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
+ AllocationSite::kDependentCodeOffset);
}
@@ -2438,6 +2632,10 @@ const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->collection()->allocation_tracker()) {
+ allocation_tracker->PrepareForSerialization();
+ }
ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream);
SerializeImpl();
@@ -2461,6 +2659,16 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
SerializeEdges();
if (writer_->aborted()) return;
writer_->AddString("],\n");
+
+ writer_->AddString("\"trace_function_infos\":[");
+ SerializeTraceNodeInfos();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"trace_tree\":[");
+ SerializeTraceTree();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
writer_->AddString("\"strings\":[");
SerializeStrings();
if (writer_->aborted()) return;
@@ -2472,7 +2680,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
+ const_cast<char*>(s), StringHash(s), true);
if (cache_entry->value == NULL) {
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
}
@@ -2621,7 +2829,20 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("shortcut") ","
JSON_S("weak")) ","
JSON_S("string_or_number") ","
- JSON_S("node"))));
+ JSON_S("node")) ","
+ JSON_S("trace_function_info_fields") ":" JSON_A(
+ JSON_S("function_id") ","
+ JSON_S("name") ","
+ JSON_S("script_name") ","
+ JSON_S("script_id") ","
+ JSON_S("line") ","
+ JSON_S("column")) ","
+ JSON_S("trace_node_fields") ":" JSON_A(
+ JSON_S("id") ","
+ JSON_S("function_id") ","
+ JSON_S("count") ","
+ JSON_S("size") ","
+ JSON_S("children"))));
#undef JSON_S
#undef JSON_O
#undef JSON_A
@@ -2629,6 +2850,13 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddNumber(snapshot_->entries().length());
writer_->AddString(",\"edge_count\":");
writer_->AddNumber(snapshot_->edges().length());
+ writer_->AddString(",\"trace_function_count\":");
+ uint32_t count = 0;
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (tracker) {
+ count = tracker->id_to_function_info()->occupancy();
+ }
+ writer_->AddNumber(count);
}
@@ -2642,6 +2870,100 @@ static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
}
+void HeapSnapshotJSONSerializer::SerializeTraceTree() {
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ AllocationTraceTree* traces = tracker->trace_tree();
+ SerializeTraceNode(traces->root());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
+ // The buffer needs space for 4 unsigned ints, 4 commas, [ and \0
+ const int kBufferSize =
+ 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 4 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ buffer_pos = utoa(node->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer[buffer_pos++] = '[';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+
+ Vector<AllocationTraceNode*> children = node->children();
+ for (int i = 0; i < children.length(); i++) {
+ if (i > 0) {
+ writer_->AddCharacter(',');
+ }
+ SerializeTraceNode(children[i]);
+ }
+ writer_->AddCharacter(']');
+}
+
+
+// 0-based position is converted to 1-based during the serialization.
+static int SerializePosition(int position, const Vector<char>& buffer,
+ int buffer_pos) {
+ if (position == -1) {
+ buffer[buffer_pos++] = '0';
+ } else {
+ ASSERT(position >= 0);
+ buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos);
+ }
+ return buffer_pos;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ // The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
+ const int kBufferSize =
+ 6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ HashMap* id_to_function_info = tracker->id_to_function_info();
+ bool first_entry = true;
+ for (HashMap::Entry* p = id_to_function_info->Start();
+ p != NULL;
+ p = id_to_function_info->Next(p)) {
+ SnapshotObjectId id =
+ static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
+ AllocationTracker::FunctionInfo* info =
+ reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ int buffer_pos = 0;
+ if (first_entry) {
+ first_entry = false;
+ } else {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->script_name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ // The cast is safe because script id is a non-negative Smi.
+ buffer_pos = utoa(static_cast<unsigned>(info->script_id), buffer,
+ buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->line, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->column, buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+ }
+}
+
+
void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
writer_->AddCharacter('\n');
writer_->AddCharacter('\"');
@@ -2693,37 +3015,21 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
+ ScopedVector<const unsigned char*> sorted_strings(
+ strings_.occupancy() + 1);
+ for (HashMap::Entry* entry = strings_.Start();
+ entry != NULL;
+ entry = strings_.Next(entry)) {
+ int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
+ sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
+ }
writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
+ for (int i = 1; i < sorted_strings.length(); ++i) {
writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
+ SerializeString(sorted_strings[i]);
if (writer_->aborted()) return;
}
}
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index 7b0cf8f021..e4038b10f4 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -33,6 +33,8 @@
namespace v8 {
namespace internal {
+class AllocationTracker;
+class AllocationTraceNode;
class HeapEntry;
class HeapSnapshot;
@@ -227,8 +229,12 @@ class HeapObjectsMap {
void SnapshotGenerationFinished();
SnapshotObjectId FindEntry(Address addr);
- SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
- void MoveObject(Address from, Address to);
+ SnapshotObjectId FindOrAddEntry(Address addr,
+ unsigned int size,
+ bool accessed = true);
+ void MoveObject(Address from, Address to, int size);
+ void NewObject(Address addr, int size);
+ void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
}
@@ -247,6 +253,10 @@ class HeapObjectsMap {
static const SnapshotObjectId kGcRootsFirstSubrootId;
static const SnapshotObjectId kFirstAvailableObjectId;
+ int FindUntrackedObjects();
+
+ void UpdateHeapObjectsMap();
+
private:
struct EntryInfo {
EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
@@ -265,7 +275,6 @@ class HeapObjectsMap {
uint32_t count;
};
- void UpdateHeapObjectsMap();
void RemoveDeadEntries();
SnapshotObjectId next_id_;
@@ -289,8 +298,8 @@ class HeapSnapshotsCollection {
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
return ids_.PushHeapObjectsStats(stream);
}
- void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
- void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
+ void StartHeapObjectsTracking();
+ void StopHeapObjectsTracking();
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
@@ -298,6 +307,7 @@ class HeapSnapshotsCollection {
void RemoveSnapshot(HeapSnapshot* snapshot);
StringsStorage* names() { return &names_; }
+ AllocationTracker* allocation_tracker() { return allocation_tracker_; }
SnapshotObjectId FindObjectId(Address object_addr) {
return ids_.FindEntry(object_addr);
@@ -306,18 +316,29 @@ class HeapSnapshotsCollection {
return ids_.FindOrAddEntry(object_addr, object_size);
}
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+ void ObjectMoveEvent(Address from, Address to, int size) {
+ ids_.MoveObject(from, to, size);
+ }
+ void NewObjectEvent(Address addr, int size);
+ void UpdateObjectSizeEvent(Address addr, int size) {
+ ids_.UpdateObjectSize(addr, size);
+ }
SnapshotObjectId last_assigned_id() const {
return ids_.last_assigned_id();
}
size_t GetUsedMemorySize() const;
+ int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
+
+ void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); }
+
private:
bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
StringsStorage names_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
+ AllocationTracker* allocation_tracker_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
@@ -628,7 +649,7 @@ class HeapSnapshotJSONSerializer {
public:
explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
: snapshot_(snapshot),
- strings_(ObjectsMatch),
+ strings_(StringsMatch),
next_node_id_(1),
next_string_id_(1),
writer_(NULL) {
@@ -636,14 +657,16 @@ class HeapSnapshotJSONSerializer {
void Serialize(v8::OutputStream* stream);
private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
}
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
- v8::internal::kZeroHashSeed);
+ INLINE(static uint32_t StringHash(const void* string)) {
+ const char* s = reinterpret_cast<const char*>(string);
+ int len = static_cast<int>(strlen(s));
+ return StringHasher::HashSequentialString(
+ s, len, v8::internal::kZeroHashSeed);
}
int GetStringId(const char* s);
@@ -654,9 +677,11 @@ class HeapSnapshotJSONSerializer {
void SerializeNode(HeapEntry* entry);
void SerializeNodes();
void SerializeSnapshot();
+ void SerializeTraceTree();
+ void SerializeTraceNode(AllocationTraceNode* node);
+ void SerializeTraceNodeInfos();
void SerializeString(const unsigned char* s);
void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
static const int kEdgeFieldsCount;
static const int kNodeFieldsCount;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 24e4039422..fa358c5392 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -67,29 +67,14 @@ namespace internal {
Heap::Heap()
: isolate_(NULL),
+ code_range_size_(kIs64BitArch ? 512 * MB : 0),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if V8_TARGET_ARCH_X64
-#define LUMP_OF_MEMORY (2 * MB)
- code_range_size_(512*MB),
-#else
-#define LUMP_OF_MEMORY MB
- code_range_size_(0),
-#endif
-#if defined(ANDROID) || V8_TARGET_ARCH_MIPS
- reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
-#else
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
+ max_semispace_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(256l * LUMP_OF_MEMORY),
-#endif
-
+ max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+ max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -101,6 +86,7 @@ Heap::Heap()
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
+ allocation_mementos_found_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -129,8 +115,6 @@ Heap::Heap()
old_gen_exhausted_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(NULL),
@@ -157,9 +141,11 @@ Heap::Heap()
mark_sweeps_since_idle_round_started_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+ full_codegen_bytes_generated_(0),
+ crankshaft_codegen_bytes_generated_(0),
gcs_since_last_deopt_(0),
#ifdef VERIFY_HEAP
- no_weak_embedded_maps_verification_scope_depth_(0),
+ no_weak_object_verification_scope_depth_(0),
#endif
promotion_queue_(this),
configured_(false),
@@ -172,6 +158,9 @@ Heap::Heap()
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
+ // Ensure old_generation_size_ is a multiple of kPageSize.
+ ASSERT(MB >= Page::kPageSize);
+
intptr_t max_virtual = OS::MaxVirtualMemory();
if (max_virtual > 0) {
@@ -461,6 +450,10 @@ void Heap::GarbageCollectionPrologue() {
#endif // DEBUG
store_buffer()->GCPrologue();
+
+ if (FLAG_concurrent_osr) {
+ isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+ }
}
@@ -521,10 +514,31 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->number_of_symbols()->Set(
string_table()->NumberOfElements());
+ if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
+ isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
+ static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
+ (crankshaft_codegen_bytes_generated_
+ + full_codegen_bytes_generated_)));
+ }
+
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_new_space()->
+ AddSample(static_cast<int>(
+ (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
+ static_cast<int>(
+ (old_pointer_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_data_space()->AddSample(
+ static_cast<int>(
+ (old_data_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_code_space()->
+ AddSample(static_cast<int>(
+ (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_map_space()->AddSample(
static_cast<int>(
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
@@ -535,6 +549,9 @@ void Heap::GarbageCollectionEpilogue() {
AddSample(static_cast<int>(
(property_cell_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
+ isolate_->counters()->heap_fraction_lo_space()->
+ AddSample(static_cast<int>(
+ (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
@@ -548,6 +565,8 @@ void Heap::GarbageCollectionEpilogue() {
heap_sample_property_cell_space_committed()->
AddSample(static_cast<int>(
property_cell_space()->CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_code_space_committed()->AddSample(
+ static_cast<int>(code_space()->CommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
@@ -610,6 +629,11 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (FLAG_concurrent_recompilation) {
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ DisallowHeapAllocation no_recursive_gc;
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -1055,12 +1079,17 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
- global_gc_prologue_callback_();
- }
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, flags);
+ if (!gc_prologue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_prologue_callbacks_[i].callback);
+ callback(gc_type, flags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+ }
}
}
}
@@ -1069,12 +1098,18 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+ if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_epilogue_callbacks_[i].callback);
+ callback(gc_type, kNoGCCallbackFlags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_epilogue_callbacks_[i].callback(
+ isolate, gc_type, kNoGCCallbackFlags);
+ }
}
}
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
- global_gc_epilogue_callback_();
- }
}
@@ -1326,6 +1361,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
+ allocation_mementos_found_ = 0;
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1473,6 +1510,11 @@ void Heap::Scavenge() {
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
+
+ if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
+ PrintF("AllocationMementos found during scavenge = %d\n",
+ allocation_mementos_found_);
+ }
}
@@ -1947,6 +1989,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
@@ -2091,8 +2134,12 @@ class ScavengingVisitor : public StaticVisitorBase {
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Isolate* isolate = heap->isolate();
+ HeapProfiler* heap_profiler = isolate->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ size);
+ }
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
@@ -2129,12 +2176,10 @@ class ScavengingVisitor : public StaticVisitorBase {
MaybeObject* maybe_result;
if (object_contents == DATA_OBJECT) {
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
@@ -2165,8 +2210,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
@@ -2392,7 +2436,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
// Map::cast cannot be used due to uninitialized map field.
@@ -2417,7 +2461,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
@@ -2650,6 +2694,12 @@ bool Heap::CreateInitialMaps() {
set_fixed_double_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
+ AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_constant_pool_array_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2887,12 +2937,12 @@ bool Heap::CreateInitialMaps() {
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
+ int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2902,26 +2952,12 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
-
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
MaybeObject* Heap::AllocateCell(Object* value) {
+ int size = Cell::kSize;
+ STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- { MaybeObject* maybe_result = AllocateRawCell();
+ { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
@@ -2930,9 +2966,13 @@ MaybeObject* Heap::AllocateCell(Object* value) {
}
-MaybeObject* Heap::AllocatePropertyCell(Object* value) {
+MaybeObject* Heap::AllocatePropertyCell() {
+ int size = PropertyCell::kSize;
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- MaybeObject* maybe_result = AllocateRawPropertyCell();
+ MaybeObject* maybe_result =
+ AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
HeapObject::cast(result)->set_map_no_write_barrier(
@@ -2940,10 +2980,8 @@ MaybeObject* Heap::AllocatePropertyCell(Object* value) {
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_value(value);
+ cell->set_value(the_hole_value());
cell->set_type(Type::None());
- maybe_result = cell->SetValueInferType(value);
- if (maybe_result->IsFailure()) return maybe_result;
return result;
}
@@ -2958,17 +2996,16 @@ MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateAllocationSite() {
- Object* result;
+ AllocationSite* site;
MaybeObject* maybe_result = Allocate(allocation_site_map(),
OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- AllocationSite* site = AllocationSite::cast(result);
+ if (!maybe_result->To(&site)) return maybe_result;
site->Initialize();
// Link the site
site->set_weak_next(allocation_sites_list());
set_allocation_sites_list(site);
- return result;
+ return site;
}
@@ -4057,31 +4094,8 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
return Failure::OutOfMemoryException(0x7);
}
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
- }
int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x8);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4112,11 +4126,10 @@ MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = ExternalArray::kAlignedSize;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4134,7 +4147,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
bool immovable,
- bool crankshafted) {
+ bool crankshafted,
+ int prologue_offset) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
@@ -4184,10 +4198,18 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
- code->set_prologue_offset(kPrologueOffsetNotSet);
+ code->set_prologue_offset(prologue_offset);
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (code->kind() == Code::FUNCTION) {
+ code->set_has_debug_break_slots(
+ isolate_->debugger()->IsDebuggerActive());
+ }
+#endif
+
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -4310,6 +4332,7 @@ MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
return result;
}
@@ -4414,10 +4437,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
arguments_object_size = kArgumentsObjectSize;
}
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
-
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
@@ -4553,9 +4572,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
}
// Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
MaybeObject* maybe_obj = Allocate(map, space);
if (!maybe_obj->To(&obj)) return maybe_obj;
@@ -4588,8 +4606,8 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
}
// Allocate the JSObject.
- AllocationSpace space = NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
Object* obj;
MaybeObject* maybe_obj =
AllocateWithAllocationSite(map, space, allocation_site);
@@ -4745,20 +4763,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
}
-MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode) {
- MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
- allocation_site);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
- return AllocateJSArrayStorage(array, length, capacity, mode);
-}
-
-
MaybeObject* Heap::AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -4861,74 +4865,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
}
-MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
- ASSERT(map->is_dictionary_map());
-
- // Make sure no field properties are described in the initial map.
- // This guarantees us that normalizing the properties does not
- // require us to change property values to PropertyCells.
- ASSERT(map->NextFreePropertyIndex() == 0);
-
- // Make sure we don't have a ton of pre-allocated slots in the
- // global objects. They will be unused once we normalize the object.
- ASSERT(map->unused_property_fields() == 0);
- ASSERT(map->inobject_properties() == 0);
-
- // Initial size of the backing store to avoid resize of the storage during
- // bootstrapping. The size differs between the JS global object ad the
- // builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
-
- // Allocate a dictionary object for backing storage.
- NameDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- NameDictionary::Allocate(
- this,
- map->NumberOfOwnDescriptors() * 2 + initial_size);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
-
- // The global object might be created from an object template with accessors.
- // Fill these accessors into the dictionary.
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
- Object* value = descs->GetCallbacksObject(i);
- MaybeObject* maybe_value = AllocatePropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
-
- MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_added->To(&dictionary)) return maybe_added;
- }
-
- // Allocate the global object and initialize it with the backing store.
- JSObject* global;
- MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_global->To(&global)) return maybe_global;
-
- InitializeJSObjectFromMap(global, dictionary, map);
-
- // Create a new map for the global object.
- Map* new_map;
- MaybeObject* maybe_map = map->CopyDropDescriptors();
- if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->set_dictionary_map(true);
-
- // Set up the global object as a normalized object.
- global->set_map(new_map);
- global->set_properties(dictionary);
-
- // Make sure result is a global object with properties in dictionary.
- ASSERT(global->IsGlobalObject());
- ASSERT(!global->HasFastProperties());
- return global;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
+MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4938,6 +4875,9 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
int object_size = map->instance_size();
Object* clone;
+ ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
+ map->instance_type() == JS_ARRAY_TYPE));
+
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
@@ -4958,7 +4898,10 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
} else {
wb_mode = SKIP_WRITE_BARRIER;
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
+ { int adjusted_object_size = site != NULL
+ ? object_size + AllocationMemento::kSize
+ : object_size;
+ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
@@ -4967,115 +4910,21 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
- }
-
- SLOW_ASSERT(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
- if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
- }
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
- }
- // Return the new clone.
- return clone;
-}
-
-
-MaybeObject* Heap::CopyJSObjectWithAllocationSite(
- JSObject* source,
- AllocationSite* site) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- ASSERT(AllocationSite::CanTrack(map->instance_type()));
- ASSERT(map->instance_type() == JS_ARRAY_TYPE);
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- int adjusted_object_size = object_size;
- if (always_allocate()) {
- // We'll only track origin if we are certain to allocate in new space
- const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationMemento::kSize;
- }
-
- { MaybeObject* maybe_clone =
- AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- int write_barrier_offset = adjusted_object_size > object_size
- ? JSArray::kSize + AllocationMemento::kSize
- : JSObject::kHeaderSize;
- if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
- RecordWrites(clone_address,
- write_barrier_offset,
- (object_size - write_barrier_offset) / kPointerSize);
- }
- // Track allocation site information, if we failed to allocate it inline.
- if (InNewSpace(clone) &&
- adjusted_object_size == object_size) {
- MaybeObject* maybe_alloc_memento =
- AllocateStruct(ALLOCATION_MEMENTO_TYPE);
- AllocationMemento* alloc_memento;
- if (maybe_alloc_memento->To(&alloc_memento)) {
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(site->map() == allocation_site_map());
+ alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ HeapProfiler* profiler = isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
+ object_size);
+ profiler->NewObjectEvent(alloc_memento->address(),
+ AllocationMemento::kSize);
}
}
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
- adjusted_object_size += AllocationMemento::kSize;
-
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- SLOW_ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- if (adjusted_object_size > object_size) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
SLOW_ASSERT(
@@ -5366,12 +5215,11 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5410,16 +5258,10 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5440,16 +5282,10 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5474,24 +5310,6 @@ MaybeObject* Heap::AllocateJSArray(
}
-MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
- return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
-}
-
-
MaybeObject* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
Object* result;
@@ -5512,25 +5330,10 @@ MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
}
-MaybeObject* Heap::AllocateRawFixedArray(int length) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xd);
- }
- ASSERT(length > 0);
- // Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, TENURED);
- // Allocate the raw data for a fixed array.
- int size = FixedArray::SizeFor(length);
- return size <= Page::kMaxNonCodeHeapObjectSize
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
-}
-
-
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
if (InNewSpace(obj)) {
@@ -5570,21 +5373,24 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
}
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+ Map* map) {
+ int int64_entries = src->count_of_int64_entries();
+ int ptr_entries = src->count_of_ptr_entries();
+ int int32_entries = src->count_of_int32_entries();
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_no_write_barrier(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
+ HeapObject* dst = HeapObject::cast(obj);
+ dst->set_map_no_write_barrier(map);
+ CopyBlock(
+ dst->address() + ConstantPoolArray::kLengthOffset,
+ src->address() + ConstantPoolArray::kLengthOffset,
+ ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
+ - ConstantPoolArray::kLengthOffset);
+ return obj;
}
@@ -5593,35 +5399,26 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
return Failure::OutOfMemoryException(0xe);
}
int size = FixedArray::SizeFor(length);
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
-
- return AllocateRaw(size, space, retry_space);
+ return AllocateRaw(size, space, OLD_POINTER_SPACE);
}
-MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
- Heap* heap,
- int length,
- PretenureFlag pretenure,
- Object* filler) {
+MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
+ PretenureFlag pretenure,
+ Object* filler) {
ASSERT(length >= 0);
- ASSERT(heap->empty_fixed_array()->IsFixedArray());
- if (length == 0) return heap->empty_fixed_array();
+ ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length == 0) return empty_fixed_array();
- ASSERT(!heap->InNewSpace(filler));
+ ASSERT(!InNewSpace(filler));
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -5630,19 +5427,13 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- undefined_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
}
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- the_hole_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
}
@@ -5650,7 +5441,7 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -5720,24 +5511,52 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
return Failure::OutOfMemoryException(0xf);
}
int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
+ HeapObject* object;
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ return EnsureDoubleAligned(this, object, size);
+}
+
+
+MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+#ifndef V8_HOST_ARCH_64_BIT
+ size += kPointerSize;
+#endif
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ object = EnsureDoubleAligned(this, object, size);
+ HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
- return EnsureDoubleAligned(this, object, size);
+ ConstantPoolArray* constant_pool =
+ reinterpret_cast<ConstantPoolArray*>(object);
+ constant_pool->SetEntryCounts(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+ MemsetPointer(
+ HeapObject::RawField(
+ constant_pool,
+ constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ undefined_value(),
+ number_of_ptr_entries);
+ return constant_pool;
}
@@ -5937,8 +5756,7 @@ STRUCT_LIST(MAKE_CASE)
return Failure::InternalError();
}
int size = map->instance_size();
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
Object* result;
{ MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -6965,6 +6783,7 @@ bool Heap::CreateHeapObjects() {
native_contexts_list_ = undefined_value();
array_buffers_list_ = undefined_value();
allocation_sites_list_ = undefined_value();
+ weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -7068,15 +6887,17 @@ void Heap::TearDown() {
}
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
+ GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
@@ -7088,15 +6909,17 @@ void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
}
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
+ GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
@@ -7108,6 +6931,37 @@ void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
}
+MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
+ DependentCode* dep) {
+ ASSERT(!InNewSpace(obj));
+ ASSERT(!InNewSpace(dep));
+ MaybeObject* maybe_obj =
+ WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
+ WeakHashTable* table;
+ if (!maybe_obj->To(&table)) return maybe_obj;
+ if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
+ WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
+ }
+ set_weak_object_to_code_table(table);
+ ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
+ return weak_object_to_code_table_;
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
+ Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+ if (dep->IsDependentCode()) return DependentCode::cast(dep);
+ return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+ if (!weak_object_to_code_table()->IsHashTable()) {
+ set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
+ }
+}
+
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
@@ -8090,6 +7944,18 @@ void Heap::CheckpointObjectStats() {
static_cast<int>(object_sizes_last_time_[index]));
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge; \
+ counters->count_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_AGE_LIST_WITH_NO_AGE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 4dfa076ebd..96cda586b7 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -71,6 +71,7 @@ namespace internal {
V(Map, scope_info_map, ScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
@@ -292,7 +293,10 @@ namespace internal {
V(throw_string, "throw") \
V(done_string, "done") \
V(value_string, "value") \
- V(next_string, "next")
+ V(next_string, "next") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(buffer_string, "buffer")
// Forward declarations.
class GCTracer;
@@ -635,10 +639,6 @@ class Heap {
pretenure);
}
- inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site);
-
// Allocate a JSArray with a specified length but elements that are left
// uninitialized.
MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
@@ -648,13 +648,6 @@ class Heap {
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
-
MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -668,19 +661,12 @@ class Heap {
int length,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates and initializes a new global object based on a constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
-
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
-
- MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(
- JSObject* source, AllocationSite* site);
+ // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -876,14 +862,9 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
-
- // Allocate a non-tenured byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -901,22 +882,6 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSymbol();
- // Allocate a tenured simple cell.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
-
- // Allocate a tenured JS global property cell.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocatePropertyCell(Object* value);
-
- // Allocate Box.
- MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
- PretenureFlag pretenure);
-
// Allocate a tenured AllocationSite. It's payload is null
MUST_USE_RESULT MaybeObject* AllocateAllocationSite();
@@ -924,10 +889,9 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
- PretenureFlag pretenure);
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
@@ -958,6 +922,16 @@ class Heap {
MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
FixedDoubleArray* src, Map* map);
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline MaybeObject* CopyConstantPoolArray(
+ ConstantPoolArray* src);
+
+ // Make a copy of src, set the map, and return the copy. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT MaybeObject* CopyConstantPoolArrayWithMap(
+ ConstantPoolArray* src, Map* map);
+
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -966,9 +940,10 @@ class Heap {
int length,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
+ int first_int64_index,
+ int first_ptr_index,
+ int first_int32_index);
// Allocates a fixed double array with uninitialized values. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
@@ -1056,10 +1031,7 @@ class Heap {
// Allocated a HeapNumber from value.
MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value,
- PretenureFlag pretenure);
- // pretenure = NOT_TENURED
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1153,11 +1125,13 @@ class Heap {
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false,
- bool crankshafted = false);
+ MUST_USE_RESULT MaybeObject* CreateCode(
+ const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false,
+ bool crankshafted = false,
+ int prologue_offset = Code::kPrologueOffsetNotSet);
MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
@@ -1272,22 +1246,15 @@ class Heap {
void GarbageCollectionGreedyCheck();
#endif
- void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCPrologueCallback callback);
-
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
- void SetGlobalGCPrologueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
- global_gc_prologue_callback_ = callback;
- }
- void SetGlobalGCEpilogueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
- global_gc_epilogue_callback_ = callback;
- }
+ void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
@@ -1337,6 +1304,8 @@ class Heap {
Object* allocation_sites_list() { return allocation_sites_list_; }
Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+ Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1428,8 +1397,8 @@ class Heap {
void Verify();
- bool weak_embedded_maps_verification_enabled() {
- return no_weak_embedded_maps_verification_scope_depth_ == 0;
+ bool weak_embedded_objects_verification_enabled() {
+ return no_weak_object_verification_scope_depth_ == 0;
}
#endif
@@ -1530,11 +1499,6 @@ class Heap {
inline intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
- // Allocate uninitialized fixed array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
-
// This is only needed for testing high promotion mode.
void SetNewSpaceHighPromotionModeActive(bool mode) {
new_space_high_promotion_mode_active_ = mode;
@@ -1692,6 +1656,14 @@ class Heap {
total_regexp_code_generated_ += size;
}
+ void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+ if (is_crankshafted) {
+ crankshaft_codegen_bytes_generated_ += size;
+ } else {
+ full_codegen_bytes_generated_ += size;
+ }
+ }
+
// Returns maximum GC pause.
double get_max_gc_pause() { return max_gc_pause_; }
@@ -1838,26 +1810,30 @@ class Heap {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- OBJECT_STATS_COUNT =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
+ FIRST_CODE_AGE_SUB_TYPE =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kLastCodeAge + 1
};
- void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
+ void RecordObjectStats(InstanceType type, size_t size) {
ASSERT(type <= LAST_TYPE);
- if (sub_type < 0) {
- object_counts_[type]++;
- object_sizes_[type] += size;
- } else {
- if (type == CODE_TYPE) {
- ASSERT(sub_type < Code::NUMBER_OF_KINDS);
- object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
- } else if (type == FIXED_ARRAY_TYPE) {
- ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
- }
- }
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ }
+
+ void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+ ASSERT(code_sub_type < Code::NUMBER_OF_KINDS);
+ ASSERT(code_age < Code::kLastCodeAge);
+ object_counts_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type]++;
+ object_sizes_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type] += size;
+ object_counts_[FIRST_CODE_AGE_SUB_TYPE + code_age]++;
+ object_sizes_[FIRST_CODE_AGE_SUB_TYPE + code_age] += size;
+ }
+
+ void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+ ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
}
void CheckpointObjectStats();
@@ -1887,6 +1863,16 @@ class Heap {
Heap* heap_;
};
+ MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep);
+
+ DependentCode* LookupWeakObjectToCodeDependency(Object* obj);
+
+ void InitializeWeakObjectToCodeTable() {
+ set_weak_object_to_code_table(undefined_value());
+ }
+
+ void EnsureWeakObjectToCodeTable();
+
private:
Heap();
@@ -1920,6 +1906,9 @@ class Heap {
bool flush_monomorphic_ics_;
+ // AllocationMementos found in new space.
+ int allocation_mementos_found_;
+
int scan_on_scavenge_pages_;
NewSpace new_space_;
@@ -1998,10 +1987,16 @@ class Heap {
bool old_gen_exhausted_;
// Weak list heads, threaded through the objects.
+ // List heads are initilized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
Object* array_buffers_list_;
Object* allocation_sites_list_;
+ // WeakHashTable that maps objects embedded in optimized code to dependent
+ // code list. It is initilized lazily and contains the undefined_value at
+ // start.
+ Object* weak_object_to_code_table_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@@ -2032,32 +2027,37 @@ class Heap {
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCPrologueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCPrologueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCEpilogueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCEpilogueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
- GCCallback global_gc_prologue_callback_;
- GCCallback global_gc_epilogue_callback_;
-
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
@@ -2080,17 +2080,28 @@ class Heap {
inline void UpdateOldSpaceLimits();
- // Allocate an uninitialized object in map space. The behavior is identical
- // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
- // have to test the allocation space argument and (b) can reduce code size
- // (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
+ // Selects the proper allocation space depending on the given object
+ // size, pretenuring decision, and preferred old-space.
+ static AllocationSpace SelectSpace(int object_size,
+ AllocationSpace preferred_old_space,
+ PretenureFlag pretenure) {
+ ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
+ preferred_old_space == OLD_DATA_SPACE);
+ if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
+ }
+
+ // Allocate an uninitialized fixed array.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(
+ int length, PretenureFlag pretenure);
- // Allocate an uninitialized object in the simple cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
+ // Allocate an uninitialized fixed double array.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
+ int length, PretenureFlag pretenure);
- // Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawPropertyCell();
+ // Allocate an initialized fixed array with the given filler value.
+ MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithFiller(
+ int length, PretenureFlag pretenure, Object* filler);
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj,
@@ -2116,10 +2127,6 @@ class Heap {
ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site);
-
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
@@ -2130,6 +2137,16 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+ // Allocate a tenured simple cell.
+ MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
+
+ // Allocate a tenured JS global property cell initialized with the hole.
+ MUST_USE_RESULT MaybeObject* AllocatePropertyCell();
+
+ // Allocate Box.
+ MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
+ PretenureFlag pretenure);
+
// Performs a minor collection in new generation.
void Scavenge();
@@ -2286,6 +2303,15 @@ class Heap {
void ClearObjectStats(bool clear_last_time_stats = false);
+ void set_weak_object_to_code_table(Object* value) {
+ ASSERT(!InNewSpace(value));
+ weak_object_to_code_table_ = value;
+ }
+
+ Object** weak_object_to_code_table_address() {
+ return &weak_object_to_code_table_;
+ }
+
static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -2335,13 +2361,17 @@ class Heap {
unsigned int gc_count_at_last_idle_gc_;
int scavenges_since_last_idle_round_;
+ // These two counters are monotomically increasing and never reset.
+ size_t full_codegen_bytes_generated_;
+ size_t crankshaft_codegen_bytes_generated_;
+
// If the --deopt_every_n_garbage_collections flag is set to a positive value,
// this variable holds the number of garbage collections since the last
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
#ifdef VERIFY_HEAP
- int no_weak_embedded_maps_verification_scope_depth_;
+ int no_weak_object_verification_scope_depth_;
#endif
static const int kMaxMarkSweepsInIdleRound = 7;
@@ -2375,7 +2405,7 @@ class Heap {
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
#ifdef VERIFY_HEAP
- friend class NoWeakEmbeddedMapsVerificationScope;
+ friend class NoWeakObjectVerificationScope;
#endif
DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -2440,10 +2470,10 @@ class AlwaysAllocateScope {
};
#ifdef VERIFY_HEAP
-class NoWeakEmbeddedMapsVerificationScope {
+class NoWeakObjectVerificationScope {
public:
- inline NoWeakEmbeddedMapsVerificationScope();
- inline ~NoWeakEmbeddedMapsVerificationScope();
+ inline NoWeakObjectVerificationScope();
+ inline ~NoWeakObjectVerificationScope();
};
#endif
diff --git a/deps/v8/src/hydrogen-alias-analysis.h b/deps/v8/src/hydrogen-alias-analysis.h
index 73e116e63e..21a54625ff 100644
--- a/deps/v8/src/hydrogen-alias-analysis.h
+++ b/deps/v8/src/hydrogen-alias-analysis.h
@@ -88,15 +88,6 @@ class HAliasAnalyzer : public ZoneObject {
inline bool NoAlias(HValue* a, HValue* b) {
return Query(a, b) == kNoAlias;
}
-
- // Returns the actual value of an instruction. In the case of a chain
- // of informative definitions, return the root of the chain.
- HValue* ActualValue(HValue* obj) {
- while (obj->IsInformativeDefinition()) { // Walk a chain of idefs.
- obj = obj->RedefinedOperand();
- }
- return obj;
- }
};
diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc
index 4d96415e6a..d3f72e9339 100644
--- a/deps/v8/src/hydrogen-canonicalize.cc
+++ b/deps/v8/src/hydrogen-canonicalize.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "hydrogen-canonicalize.h"
+#include "hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
@@ -57,8 +58,15 @@ void HCanonicalizePhase::Run() {
}
}
}
+
// Perform actual Canonicalization pass.
+ HRedundantPhiEliminationPhase redundant_phi_eliminator(graph());
for (int i = 0; i < blocks->length(); ++i) {
+ // Eliminate redundant phis in the block first; changes to their inputs
+ // might have made them redundant, and eliminating them creates more
+ // opportunities for constant folding and strength reduction.
+ redundant_phi_eliminator.ProcessBlock(blocks->at(i));
+ // Now canonicalize each instruction.
for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
HValue* value = instr->Canonicalize();
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
new file mode 100644
index 0000000000..f712a39db8
--- /dev/null
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -0,0 +1,357 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-check-elimination.h"
+#include "hydrogen-alias-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kMaxTrackedObjects = 10;
+typedef UniqueSet<Map>* MapSet;
+
+// The main datastructure used during check elimination, which stores a
+// set of known maps for each object.
+class HCheckTable {
+ public:
+ explicit HCheckTable(Zone* zone) : zone_(zone) {
+ Kill();
+ redundant_ = 0;
+ narrowed_ = 0;
+ empty_ = 0;
+ removed_ = 0;
+ compares_true_ = 0;
+ compares_false_ = 0;
+ transitions_ = 0;
+ loads_ = 0;
+ }
+
+ void ReduceCheckMaps(HCheckMaps* instr) {
+ HValue* object = instr->value()->ActualValue();
+ int index = Find(object);
+ if (index >= 0) {
+ // entry found;
+ MapSet a = known_maps_[index];
+ MapSet i = instr->map_set().Copy(zone_);
+ if (a->IsSubset(i)) {
+ // The first check is more strict; the second is redundant.
+ if (checks_[index] != NULL) {
+ instr->DeleteAndReplaceWith(checks_[index]);
+ redundant_++;
+ } else {
+ instr->DeleteAndReplaceWith(instr->value());
+ removed_++;
+ }
+ return;
+ }
+ i = i->Intersect(a, zone_);
+ if (i->size() == 0) {
+ // Intersection is empty; probably megamorphic, which is likely to
+ // deopt anyway, so just leave things as they are.
+ empty_++;
+ } else {
+ // TODO(titzer): replace the first check with a more strict check.
+ narrowed_++;
+ }
+ } else {
+ // No entry; insert a new one.
+ Insert(object, instr, instr->map_set().Copy(zone_));
+ }
+ }
+
+ void ReduceCheckValue(HCheckValue* instr) {
+ // Canonicalize HCheckValues; they might have their values load-eliminated.
+ HValue* value = instr->Canonicalize();
+ if (value == NULL) {
+ instr->DeleteAndReplaceWith(instr->value());
+ removed_++;
+ } else if (value != instr) {
+ instr->DeleteAndReplaceWith(value);
+ redundant_++;
+ }
+ }
+
+ void ReduceLoadNamedField(HLoadNamedField* instr) {
+ // Reduce a load of the map field when it is known to be a constant.
+ if (!IsMapAccess(instr->access())) return;
+
+ HValue* object = instr->object()->ActualValue();
+ MapSet maps = FindMaps(object);
+ if (maps == NULL || maps->size() != 1) return; // Not a constant.
+
+ Unique<Map> map = maps->at(0);
+ HConstant* constant = HConstant::CreateAndInsertBefore(
+ instr->block()->graph()->zone(), map, true, instr);
+ instr->DeleteAndReplaceWith(constant);
+ loads_++;
+ }
+
+ void ReduceCheckMapValue(HCheckMapValue* instr) {
+ if (!instr->map()->IsConstant()) return; // Nothing to learn.
+
+ HValue* object = instr->value()->ActualValue();
+ // Match a HCheckMapValue(object, HConstant(map))
+ Unique<Map> map = MapConstant(instr->map());
+ MapSet maps = FindMaps(object);
+ if (maps != NULL) {
+ if (maps->Contains(map)) {
+ if (maps->size() == 1) {
+ // Object is known to have exactly this map.
+ instr->DeleteAndReplaceWith(NULL);
+ removed_++;
+ } else {
+ // Only one map survives the check.
+ maps->Clear();
+ maps->Add(map, zone_);
+ }
+ }
+ } else {
+ // No prior information.
+ Insert(object, map);
+ }
+ }
+
+ void ReduceStoreNamedField(HStoreNamedField* instr) {
+ HValue* object = instr->object()->ActualValue();
+ if (instr->has_transition()) {
+ // This store transitions the object to a new map.
+ Kill(object);
+ Insert(object, MapConstant(instr->transition()));
+ } else if (IsMapAccess(instr->access())) {
+ // This is a store directly to the map field of the object.
+ Kill(object);
+ if (!instr->value()->IsConstant()) return;
+ Insert(object, MapConstant(instr->value()));
+ } else if (instr->CheckGVNFlag(kChangesMaps)) {
+ // This store indirectly changes the map of the object.
+ Kill(instr->object());
+ UNREACHABLE();
+ }
+ }
+
+ void ReduceCompareMap(HCompareMap* instr) {
+ MapSet maps = FindMaps(instr->value()->ActualValue());
+ if (maps == NULL) return;
+ if (maps->Contains(instr->map())) {
+ // TODO(titzer): replace with goto true branch
+ if (maps->size() == 1) compares_true_++;
+ } else {
+ // TODO(titzer): replace with goto false branch
+ compares_false_++;
+ }
+ }
+
+ void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
+ MapSet maps = FindMaps(instr->object()->ActualValue());
+ // Can only learn more about an object that already has a known set of maps.
+ if (maps == NULL) return;
+ if (maps->Contains(instr->original_map())) {
+ // If the object has the original map, it will be transitioned.
+ maps->Remove(instr->original_map());
+ maps->Add(instr->transitioned_map(), zone_);
+ } else {
+ // Object does not have the given map, thus the transition is redundant.
+ instr->DeleteAndReplaceWith(instr->object());
+ transitions_++;
+ }
+ }
+
+ // Kill everything in the table.
+ void Kill() {
+ memset(objects_, 0, sizeof(objects_));
+ }
+
+ // Kill everything in the table that may alias {object}.
+ void Kill(HValue* object) {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ if (objects_[i] == NULL) continue;
+ if (aliasing_.MayAlias(objects_[i], object)) objects_[i] = NULL;
+ }
+ ASSERT(Find(object) < 0);
+ }
+
+ void Print() {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ if (objects_[i] == NULL) continue;
+ PrintF(" checkmaps-table @%d: object #%d ", i, objects_[i]->id());
+ if (checks_[i] != NULL) {
+ PrintF("check #%d ", checks_[i]->id());
+ }
+ MapSet list = known_maps_[i];
+ PrintF("%d maps { ", list->size());
+ for (int j = 0; j < list->size(); j++) {
+ if (j > 0) PrintF(", ");
+ PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
+ }
+ PrintF(" }\n");
+ }
+ }
+
+ void PrintStats() {
+ if (redundant_ > 0) PrintF(" redundant = %2d\n", redundant_);
+ if (removed_ > 0) PrintF(" removed = %2d\n", removed_);
+ if (narrowed_ > 0) PrintF(" narrowed = %2d\n", narrowed_);
+ if (loads_ > 0) PrintF(" loads = %2d\n", loads_);
+ if (empty_ > 0) PrintF(" empty = %2d\n", empty_);
+ if (compares_true_ > 0) PrintF(" cmp_true = %2d\n", compares_true_);
+ if (compares_false_ > 0) PrintF(" cmp_false = %2d\n", compares_false_);
+ if (transitions_ > 0) PrintF(" transitions = %2d\n", transitions_);
+ }
+
+ private:
+ int Find(HValue* object) {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ if (objects_[i] == NULL) continue;
+ if (aliasing_.MustAlias(objects_[i], object)) return i;
+ }
+ return -1;
+ }
+
+ MapSet FindMaps(HValue* object) {
+ int index = Find(object);
+ return index < 0 ? NULL : known_maps_[index];
+ }
+
+ void Insert(HValue* object, Unique<Map> map) {
+ MapSet list = new(zone_) UniqueSet<Map>();
+ list->Add(map, zone_);
+ Insert(object, NULL, list);
+ }
+
+ void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
+ for (int i = 0; i < kMaxTrackedObjects; i++) {
+ // TODO(titzer): drop old entries instead of disallowing new ones.
+ if (objects_[i] == NULL) {
+ objects_[i] = object;
+ checks_[i] = check;
+ known_maps_[i] = maps;
+ return;
+ }
+ }
+ }
+
+ bool IsMapAccess(HObjectAccess access) {
+ return access.IsInobject() && access.offset() == JSObject::kMapOffset;
+ }
+
+ Unique<Map> MapConstant(HValue* value) {
+ return Unique<Map>::cast(HConstant::cast(value)->GetUnique());
+ }
+
+ Zone* zone_;
+ HValue* objects_[kMaxTrackedObjects];
+ HValue* checks_[kMaxTrackedObjects];
+ MapSet known_maps_[kMaxTrackedObjects];
+ HAliasAnalyzer aliasing_;
+ int redundant_;
+ int removed_;
+ int narrowed_;
+ int loads_;
+ int empty_;
+ int compares_true_;
+ int compares_false_;
+ int transitions_;
+};
+
+
+void HCheckEliminationPhase::Run() {
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ EliminateLocalChecks(graph()->blocks()->at(i));
+ }
+}
+
+
+// For code de-uglification.
+#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
+
+
+// Eliminate checks local to a block.
+void HCheckEliminationPhase::EliminateLocalChecks(HBasicBlock* block) {
+ HCheckTable table(zone());
+ TRACE(("-- check-elim B%d ------------------------------------------------\n",
+ block->block_id()));
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ bool changed = false;
+ HInstruction* instr = it.Current();
+
+ switch (instr->opcode()) {
+ case HValue::kCheckMaps: {
+ table.ReduceCheckMaps(HCheckMaps::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kCheckValue: {
+ table.ReduceCheckValue(HCheckValue::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kLoadNamedField: {
+ table.ReduceLoadNamedField(HLoadNamedField::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ table.ReduceStoreNamedField(HStoreNamedField::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kCompareMap: {
+ table.ReduceCompareMap(HCompareMap::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kTransitionElementsKind: {
+ table.ReduceTransitionElementsKind(
+ HTransitionElementsKind::cast(instr));
+ changed = true;
+ break;
+ }
+ case HValue::kCheckMapValue: {
+ table.ReduceCheckMapValue(HCheckMapValue::cast(instr));
+ changed = true;
+ break;
+ }
+ default: {
+ // If the instruction changes maps uncontrollably, kill the whole town.
+ if (instr->CheckGVNFlag(kChangesMaps)) {
+ table.Kill();
+ changed = true;
+ }
+ }
+ // Improvements possible:
+ // - eliminate HCheckSmi and HCheckHeapObject
+ }
+
+ if (changed && FLAG_trace_check_elimination) table.Print();
+ }
+
+ if (FLAG_trace_check_elimination) table.PrintStats();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/marking-thread.h b/deps/v8/src/hydrogen-check-elimination.h
index 021cd5b48c..fa01964f6f 100644
--- a/deps/v8/src/marking-thread.h
+++ b/deps/v8/src/hydrogen-check-elimination.h
@@ -25,42 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_MARKING_THREAD_H_
-#define V8_MARKING_THREAD_H_
+#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_
+#define V8_HYDROGEN_CHECK_ELIMINATION_H_
-#include "atomicops.h"
-#include "flags.h"
-#include "platform.h"
-#include "v8utils.h"
-
-#include "spaces.h"
-
-#include "heap.h"
+#include "hydrogen.h"
namespace v8 {
namespace internal {
-class MarkingThread : public Thread {
+
+// Remove CheckMaps instructions through flow- and branch-sensitive analysis.
+class HCheckEliminationPhase : public HPhase {
public:
- explicit MarkingThread(Isolate* isolate);
- ~MarkingThread() {}
+ explicit HCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Check Elimination", graph) { }
void Run();
- void Stop();
- void StartMarking();
- void WaitForMarkingThread();
private:
- Isolate* isolate_;
- Heap* heap_;
- Semaphore start_marking_semaphore_;
- Semaphore end_marking_semaphore_;
- Semaphore stop_semaphore_;
- volatile AtomicWord stop_thread_;
- int id_;
- static Atomic32 id_counter_;
+ void EliminateLocalChecks(HBasicBlock* block);
};
+
} } // namespace v8::internal
-#endif // V8_MARKING_THREAD_H_
+#endif // V8_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
index 0e7253d5a4..e101ee5bcc 100644
--- a/deps/v8/src/hydrogen-dce.cc
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -31,56 +31,60 @@
namespace v8 {
namespace internal {
-bool HDeadCodeEliminationPhase::MarkLive(HValue* ref, HValue* instr) {
- if (instr->CheckFlag(HValue::kIsLive)) return false;
- instr->SetFlag(HValue::kIsLive);
-
- if (FLAG_trace_dead_code_elimination) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- if (ref != NULL) {
- ref->PrintTo(&stream);
- } else {
- stream.Add("root ");
+void HDeadCodeEliminationPhase::MarkLive(
+ HValue* instr, ZoneList<HValue*>* worklist) {
+ if (instr->CheckFlag(HValue::kIsLive)) return; // Already live.
+
+ if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr);
+
+ // Transitively mark all inputs of live instructions live.
+ worklist->Add(instr, zone());
+ while (!worklist->is_empty()) {
+ HValue* instr = worklist->RemoveLast();
+ instr->SetFlag(HValue::kIsLive);
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ HValue* input = instr->OperandAt(i);
+ if (!input->CheckFlag(HValue::kIsLive)) {
+ input->SetFlag(HValue::kIsLive);
+ worklist->Add(input, zone());
+ if (FLAG_trace_dead_code_elimination) PrintLive(instr, input);
+ }
}
- stream.Add(" -> ");
- instr->PrintTo(&stream);
- PrintF("[MarkLive %s]\n", *stream.ToCString());
}
+}
+
- return true;
+void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ if (ref != NULL) {
+ ref->PrintTo(&stream);
+ } else {
+ stream.Add("root ");
+ }
+ stream.Add(" -> ");
+ instr->PrintTo(&stream);
+ PrintF("[MarkLive %s]\n", *stream.ToCString());
}
void HDeadCodeEliminationPhase::MarkLiveInstructions() {
- ZoneList<HValue*> worklist(graph()->blocks()->length(), zone());
+ ZoneList<HValue*> worklist(10, zone());
- // Mark initial root instructions for dead code elimination.
+ // Transitively mark all live instructions, starting from roots.
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->CannotBeEliminated() && MarkLive(NULL, instr)) {
- worklist.Add(instr, zone());
- }
+ if (instr->CannotBeEliminated()) MarkLive(instr, &worklist);
}
for (int j = 0; j < block->phis()->length(); j++) {
HPhi* phi = block->phis()->at(j);
- if (phi->CannotBeEliminated() && MarkLive(NULL, phi)) {
- worklist.Add(phi, zone());
- }
+ if (phi->CannotBeEliminated()) MarkLive(phi, &worklist);
}
}
- // Transitively mark all inputs of live instructions live.
- while (!worklist.is_empty()) {
- HValue* instr = worklist.RemoveLast();
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (MarkLive(instr, instr->OperandAt(i))) {
- worklist.Add(instr->OperandAt(i), zone());
- }
- }
- }
+ ASSERT(worklist.is_empty()); // Should have processed everything.
}
@@ -93,10 +97,8 @@ void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (!instr->CheckFlag(HValue::kIsLive)) {
- // Instruction has not been marked live; assume it is dead and remove.
- // TODO(titzer): we don't remove constants because some special ones
- // might be used by later phases and are assumed to be in the graph
- if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
+ // Instruction has not been marked live, so remove it.
+ instr->DeleteAndReplaceWith(NULL);
} else {
// Clear the liveness flag to leave the graph clean for the next DCE.
instr->ClearFlag(HValue::kIsLive);
diff --git a/deps/v8/src/hydrogen-dce.h b/deps/v8/src/hydrogen-dce.h
index 19749f279a..2d73b380e4 100644
--- a/deps/v8/src/hydrogen-dce.h
+++ b/deps/v8/src/hydrogen-dce.h
@@ -45,7 +45,8 @@ class HDeadCodeEliminationPhase : public HPhase {
}
private:
- bool MarkLive(HValue* ref, HValue* instr);
+ void MarkLive(HValue* instr, ZoneList<HValue*>* worklist);
+ void PrintLive(HValue* ref, HValue* instr);
void MarkLiveInstructions();
void RemoveDeadInstructions();
};
diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.cc b/deps/v8/src/hydrogen-deoptimizing-mark.cc
deleted file mode 100644
index 626848e012..0000000000
--- a/deps/v8/src/hydrogen-deoptimizing-mark.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-deoptimizing-mark.h"
-
-namespace v8 {
-namespace internal {
-
-void HPropagateDeoptimizingMarkPhase::MarkAsDeoptimizing() {
- HBasicBlock* block = graph()->entry_block();
- ZoneList<HBasicBlock*> stack(graph()->blocks()->length(), zone());
- while (block != NULL) {
- const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
- if (!dominated_blocks->is_empty()) {
- if (block->IsDeoptimizing()) {
- for (int i = 0; i < dominated_blocks->length(); ++i) {
- dominated_blocks->at(i)->MarkAsDeoptimizing();
- }
- }
- for (int i = 1; i < dominated_blocks->length(); ++i) {
- stack.Add(dominated_blocks->at(i), zone());
- }
- block = dominated_blocks->at(0);
- } else if (!stack.is_empty()) {
- // Pop next block from stack.
- block = stack.RemoveLast();
- } else {
- // All blocks processed.
- block = NULL;
- }
- }
-}
-
-
-void HPropagateDeoptimizingMarkPhase::NullifyUnreachableInstructions() {
- if (!FLAG_unreachable_code_elimination) return;
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- bool nullify = false;
- const ZoneList<HBasicBlock*>* predecessors = block->predecessors();
- int predecessors_length = predecessors->length();
- bool all_predecessors_deoptimizing = (predecessors_length > 0);
- for (int j = 0; j < predecessors_length; ++j) {
- if (!predecessors->at(j)->IsDeoptimizing()) {
- all_predecessors_deoptimizing = false;
- break;
- }
- }
- if (all_predecessors_deoptimizing) nullify = true;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- // Leave the basic structure of the graph intact.
- if (instr->IsBlockEntry()) continue;
- if (instr->IsControlInstruction()) continue;
- if (instr->IsSimulate()) continue;
- if (instr->IsEnterInlined()) continue;
- if (instr->IsLeaveInlined()) continue;
- if (nullify) {
- HInstruction* last_dummy = NULL;
- for (int j = 0; j < instr->OperandCount(); ++j) {
- HValue* operand = instr->OperandAt(j);
- // Insert an HDummyUse for each operand, unless the operand
- // is an HDummyUse itself. If it's even from the same block,
- // remember it as a potential replacement for the instruction.
- if (operand->IsDummyUse()) {
- if (operand->block() == instr->block() &&
- last_dummy == NULL) {
- last_dummy = HInstruction::cast(operand);
- }
- continue;
- }
- if (operand->IsControlInstruction()) {
- // Inserting a dummy use for a value that's not defined anywhere
- // will fail. Some instructions define fake inputs on such
- // values as control flow dependencies.
- continue;
- }
- HDummyUse* dummy = new(graph()->zone()) HDummyUse(operand);
- dummy->InsertBefore(instr);
- last_dummy = dummy;
- }
- if (last_dummy == NULL) last_dummy = graph()->GetConstant1();
- instr->DeleteAndReplaceWith(last_dummy);
- continue;
- }
- if (instr->IsDeoptimize()) {
- ASSERT(block->IsDeoptimizing());
- nullify = true;
- }
- }
- }
-}
-
-
-void HPropagateDeoptimizingMarkPhase::Run() {
- // Skip this phase if there is nothing to be done anyway.
- if (!graph()->has_soft_deoptimize()) return;
- MarkAsDeoptimizing();
- NullifyUnreachableInstructions();
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc
index 997e4f9445..1023019923 100644
--- a/deps/v8/src/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/hydrogen-escape-analysis.cc
@@ -154,9 +154,8 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
HValue* value = state->map_value();
// TODO(mstarzinger): This will narrow a map check against a set of maps
// down to the first element in the set. Revisit and fix this.
- Handle<Map> map_object = mapcheck->map_set()->first();
- UniqueValueId map_id = mapcheck->map_unique_ids()->first();
- HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id);
+ HCheckValue* check = HCheckValue::New(
+ zone, NULL, value, mapcheck->first_map(), false);
check->InsertBefore(mapcheck);
return check;
}
@@ -307,7 +306,7 @@ void HEscapeAnalysisPhase::PerformScalarReplacement() {
number_of_objects_++;
block_states_.Clear();
- // Perform actual analysis steps.
+ // Perform actual analysis step.
AnalyzeDataFlow(allocate);
cumulative_values_ += number_of_values_;
@@ -321,8 +320,13 @@ void HEscapeAnalysisPhase::Run() {
// TODO(mstarzinger): We disable escape analysis with OSR for now, because
// spill slots might be uninitialized. Needs investigation.
if (graph()->has_osr()) return;
- CollectCapturedValues();
- PerformScalarReplacement();
+ int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
+ for (int i = 0; i < max_fixpoint_iteration_count; i++) {
+ CollectCapturedValues();
+ if (captured_.is_empty()) break;
+ PerformScalarReplacement();
+ captured_.Clear();
+ }
}
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h
new file mode 100644
index 0000000000..dfe43ec6c3
--- /dev/null
+++ b/deps/v8/src/hydrogen-flow-engine.h
@@ -0,0 +1,235 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_FLOW_ENGINE_H_
+#define V8_HYDROGEN_FLOW_ENGINE_H_
+
+#include "hydrogen.h"
+#include "hydrogen-instructions.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// An example implementation of effects that doesn't collect anything.
+class NoEffects : public ZoneObject {
+ public:
+ explicit NoEffects(Zone* zone) { }
+
+ inline bool Disabled() {
+ return true; // Nothing to do.
+ }
+ template <class State>
+ inline void Apply(State* state) {
+ // do nothing.
+ }
+ inline void Process(HInstruction* value, Zone* zone) {
+ // do nothing.
+ }
+ inline void Union(NoEffects* other, Zone* zone) {
+ // do nothing.
+ }
+};
+
+
+// An example implementation of state that doesn't track anything.
+class NoState {
+ public:
+ inline NoState* Copy(HBasicBlock* succ, Zone* zone) {
+ return this;
+ }
+ inline NoState* Process(HInstruction* value, Zone* zone) {
+ return this;
+ }
+ inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) {
+ return this;
+ }
+};
+
+
+// This class implements an engine that can drive flow-sensitive analyses
+// over a graph of basic blocks, either one block at a time (local analysis)
+// or over the entire graph (global analysis). The flow engine is parameterized
+// by the type of the state and the effects collected while walking over the
+// graph.
+//
+// The "State" collects which facts are known while passing over instructions
+// in control flow order, and the "Effects" collect summary information about
+// which facts could be invalidated on other control flow paths. The effects
+// are necessary to correctly handle loops in the control flow graph without
+// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit
+// each block at most twice; once for state, and optionally once for effects.
+//
+// The flow engine requires the State and Effects classes to implement methods
+// like the example NoState and NoEffects above. It's not necessary to provide
+// an effects implementation for local analysis.
+template <class State, class Effects>
+class HFlowEngine {
+ public:
+ HFlowEngine(HGraph* graph, Zone* zone)
+ : graph_(graph),
+ zone_(zone),
+#if DEBUG
+ pred_counts_(graph->blocks()->length(), zone),
+#endif
+ block_states_(graph->blocks()->length(), zone),
+ loop_effects_(graph->blocks()->length(), zone) {
+ loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone);
+ }
+
+ // Local analysis. Iterates over the instructions in the given block.
+ State* AnalyzeOneBlock(HBasicBlock* block, State* state) {
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
+ return state;
+ }
+
+ // Global analysis. Iterates over all blocks that are dominated by the given
+ // block, starting with the initial state. Computes effects for nested loops.
+ void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) {
+ InitializeStates();
+ SetStateAt(root, initial);
+
+ // Iterate all dominated blocks starting from the given start block.
+ for (int i = root->block_id(); i < graph_->blocks()->length(); i++) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+
+ // Skip blocks not dominated by the root node.
+ if (SkipNonDominatedBlock(root, block)) continue;
+ State* state = StateAt(block);
+
+ if (block->IsLoopHeader()) {
+ // Apply loop effects before analyzing loop body.
+ ComputeLoopEffects(block)->Apply(state);
+ } else {
+ // Must have visited all predecessors before this block.
+ CheckPredecessorCount(block);
+ }
+
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
+
+ // Propagate the block state forward to all successor blocks.
+ for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+ HBasicBlock* succ = block->end()->SuccessorAt(i);
+ IncrementPredecessorCount(succ);
+ if (StateAt(succ) == NULL) {
+ // This is the first state to reach the successor.
+ SetStateAt(succ, state->Copy(succ, zone_));
+ } else {
+ // Merge the current state with the state already at the successor.
+ SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_));
+ }
+ }
+ }
+ }
+
+ private:
+ // Computes and caches the loop effects for the loop which has the given
+ // block as its loop header.
+ Effects* ComputeLoopEffects(HBasicBlock* block) {
+ ASSERT(block->IsLoopHeader());
+ Effects* effects = loop_effects_[block->block_id()];
+ if (effects != NULL) return effects; // Already analyzed this loop.
+
+ effects = new(zone_) Effects(zone_);
+ loop_effects_[block->block_id()] = effects;
+ if (effects->Disabled()) return effects; // No effects for this analysis.
+
+ HLoopInformation* loop = block->loop_information();
+ int end = loop->GetLastBackEdge()->block_id();
+ // Process the blocks between the header and the end.
+ for (int i = block->block_id(); i <= end; i++) {
+ HBasicBlock* member = graph_->blocks()->at(i);
+ if (i != block->block_id() && member->IsLoopHeader()) {
+ // Recursively compute and cache the effects of the nested loop.
+ ASSERT(member->loop_information()->parent_loop() == loop);
+ Effects* nested = ComputeLoopEffects(member);
+ effects->Union(nested, zone_);
+ // Skip the nested loop's blocks.
+ i = member->loop_information()->GetLastBackEdge()->block_id();
+ } else {
+ // Process all the effects of the block.
+ ASSERT(member->current_loop() == loop);
+ for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
+ effects->Process(it.Current(), zone_);
+ }
+ }
+ }
+ return effects;
+ }
+
+ inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) {
+ if (root->block_id() == 0) return false; // Visit the whole graph.
+ if (root == other) return false; // Always visit the root.
+ return !root->Dominates(other); // Only visit dominated blocks.
+ }
+
+ inline State* StateAt(HBasicBlock* block) {
+ return block_states_.at(block->block_id());
+ }
+
+ inline void SetStateAt(HBasicBlock* block, State* state) {
+ block_states_.Set(block->block_id(), state);
+ }
+
+ inline void InitializeStates() {
+#if DEBUG
+ pred_counts_.Rewind(0);
+ pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_);
+#endif
+ block_states_.Rewind(0);
+ block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_);
+ }
+
+ inline void CheckPredecessorCount(HBasicBlock* block) {
+ ASSERT(block->predecessors()->length() == pred_counts_[block->block_id()]);
+ }
+
+ inline void IncrementPredecessorCount(HBasicBlock* block) {
+#if DEBUG
+ pred_counts_[block->block_id()]++;
+#endif
+ }
+
+ HGraph* graph_; // The hydrogen graph.
+ Zone* zone_; // Temporary zone.
+#if DEBUG
+ ZoneList<int> pred_counts_; // Finished predecessors (by block id).
+#endif
+ ZoneList<State*> block_states_; // Block states (by block id).
+ ZoneList<Effects*> loop_effects_; // Loop effects (by block id).
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_FLOW_ENGINE_H_
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index 9a02a1dcf4..e3bf316f37 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -396,30 +396,27 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- int id = block->block_id();
GVNFlagSet side_effects;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- side_effects.Add(instr->ChangesFlags());
- if (instr->IsDeoptimize()) {
- block_side_effects_[id].RemoveAll();
- side_effects.RemoveAll();
- break;
+ if (block->IsReachable() && !block->IsDeoptimizing()) {
+ int id = block->block_id();
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ side_effects.Add(instr->ChangesFlags());
}
- }
- block_side_effects_[id].Add(side_effects);
+ block_side_effects_[id].Add(side_effects);
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id].Add(side_effects);
- }
+ // Loop headers are part of their loop.
+ if (block->IsLoopHeader()) {
+ loop_side_effects_[id].Add(side_effects);
+ }
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id].Add(block->IsLoopHeader()
- ? loop_side_effects_[id]
- : side_effects);
+ // Propagate loop side effects upwards.
+ if (block->HasParentLoopHeader()) {
+ int header_id = block->parent_loop_header()->block_id();
+ loop_side_effects_[header_id].Add(block->IsLoopHeader()
+ ? loop_side_effects_[id]
+ : side_effects);
+ }
}
}
}
@@ -609,7 +606,8 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
// If we've disabled code motion or we're in a block that unconditionally
// deoptimizes, don't move any instructions.
- return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
+ return AllowCodeMotion() && !instr->block()->IsDeoptimizing() &&
+ instr->block()->IsReachable();
}
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index cca95b9b5f..206ab7e2ac 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -509,6 +509,17 @@ const char* HValue::Mnemonic() const {
}
+bool HValue::CanReplaceWithDummyUses() {
+ return FLAG_unreachable_code_elimination &&
+ !(block()->IsReachable() ||
+ IsBlockEntry() ||
+ IsControlInstruction() ||
+ IsSimulate() ||
+ IsEnterInlined() ||
+ IsLeaveInlined());
+}
+
+
bool HValue::IsInteger32Constant() {
return IsConstant() && HConstant::cast(this)->HasInteger32Value();
}
@@ -730,6 +741,10 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
+ if (position() == RelocInfo::kNoPosition &&
+ next->position() != RelocInfo::kNoPosition) {
+ set_position(next->position());
+ }
}
@@ -764,6 +779,10 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
+ if (position() == RelocInfo::kNoPosition &&
+ previous->position() != RelocInfo::kNoPosition) {
+ set_position(previous->position());
+ }
}
@@ -973,6 +992,9 @@ void HCallNewArray::PrintDataTo(StringStream* stream) {
void HCallRuntime::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
+ if (save_doubles() == kSaveFPRegs) {
+ stream->Add("[save doubles] ");
+ }
stream->Add("#%d", argument_count());
}
@@ -1050,9 +1072,24 @@ Representation HBranch::observed_input_representation(int index) {
}
+bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ HValue* value = this->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ *block = HConstant::cast(value)->BooleanValue()
+ ? FirstSuccessor()
+ : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
+ stream->Add(" (%p)", *map().handle());
HControlInstruction::PrintDataTo(stream);
}
@@ -1218,8 +1255,15 @@ static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
HValue* HAdd::Canonicalize() {
- if (IsIdentityOperation(left(), right(), 0)) return left();
- if (IsIdentityOperation(right(), left(), 0)) return right();
+ // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0
+ if (IsIdentityOperation(left(), right(), 0) &&
+ !left()->representation().IsDouble()) { // Left could be -0.
+ return left();
+ }
+ if (IsIdentityOperation(right(), left(), 0) &&
+ !left()->representation().IsDouble()) { // Right could be -0.
+ return right();
+ }
return this;
}
@@ -1237,6 +1281,16 @@ HValue* HMul::Canonicalize() {
}
+bool HMul::MulMinusOne() {
+ if (left()->EqualsInteger32Constant(-1) ||
+ right()->EqualsInteger32Constant(-1)) {
+ return true;
+ }
+
+ return false;
+}
+
+
HValue* HMod::Canonicalize() {
return this;
}
@@ -1431,11 +1485,9 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HStoreNamedField* store = HStoreNamedField::cast(dominator);
if (!store->has_transition() || store->object() != value()) return;
HConstant* transition = HConstant::cast(store->transition());
- for (int i = 0; i < map_set()->length(); i++) {
- if (transition->UniqueValueIdsMatch(map_unique_ids_.at(i))) {
- DeleteAndReplaceWith(NULL);
- return;
- }
+ if (map_set_.Contains(transition->GetUnique())) {
+ DeleteAndReplaceWith(NULL);
+ return;
}
}
}
@@ -1443,9 +1495,9 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" [%p", *map_set()->first());
- for (int i = 1; i < map_set()->length(); ++i) {
- stream->Add(",%p", *map_set()->at(i));
+ stream->Add(" [%p", *map_set_.at(0).handle());
+ for (int i = 1; i < map_set_.size(); ++i) {
+ stream->Add(",%p", *map_set_.at(i).handle());
}
stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
}
@@ -1454,13 +1506,13 @@ void HCheckMaps::PrintDataTo(StringStream* stream) {
void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
- object()->ShortPrint(stream);
+ object().handle()->ShortPrint(stream);
}
HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_))
+ HConstant::cast(value())->GetUnique() == object_)
? NULL
: this;
}
@@ -1555,6 +1607,11 @@ Range* HConstant::InferRange(Zone* zone) {
}
+int HPhi::position() const {
+ return block()->first()->position();
+}
+
+
Range* HPhi::InferRange(Zone* zone) {
Representation r = representation();
if (r.IsSmiOrInteger32()) {
@@ -1624,10 +1681,13 @@ Range* HMul::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->MulAndCheckOverflow(r, b)) {
- // Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
- // would be wrong, because truncated integer multiplication is too
- // precise and therefore not the same as converting to Double and back.
+ if (!res->MulAndCheckOverflow(r, b) ||
+ (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
+ MulMinusOne())) {
+ // Truncated int multiplication is too precise and therefore not the
+ // same as converting to Double and back.
+ // Handle truncated integer multiplication by -1 special.
ClearFlag(kCanOverflow);
}
res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
@@ -1649,7 +1709,10 @@ Range* HDiv::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
(a->CanBeMinusZero() ||
(a->CanBeZero() && b->CanBeNegative())));
- if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ if (!a->Includes(kMinInt) ||
+ !b->Includes(-1) ||
+ CheckFlag(kAllUsesTruncatingToInt32)) {
+ // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
ClearFlag(HValue::kCanOverflow);
}
@@ -2327,23 +2390,38 @@ void HSimulate::ReplayEnvironment(HEnvironment* env) {
}
+static void ReplayEnvironmentNested(const ZoneList<HValue*>* values,
+ HCapturedObject* other) {
+ for (int i = 0; i < values->length(); ++i) {
+ HValue* value = values->at(i);
+ if (value->IsCapturedObject()) {
+ if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) {
+ values->at(i) = other;
+ } else {
+ ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other);
+ }
+ }
+ }
+}
+
+
// Replay captured objects by replacing all captured objects with the
// same capture id in the current and all outer environments.
void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
ASSERT(env != NULL);
while (env != NULL) {
- for (int i = 0; i < env->length(); ++i) {
- HValue* value = env->values()->at(i);
- if (value->IsCapturedObject() &&
- HCapturedObject::cast(value)->capture_id() == this->capture_id()) {
- env->SetValueAt(i, this);
- }
- }
+ ReplayEnvironmentNested(env->values(), this);
env = env->outer();
}
}
+void HCapturedObject::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d ", capture_id());
+ HDematerializedObject::PrintDataTo(stream);
+}
+
+
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
Zone* zone) {
ASSERT(return_target->IsInlineReturnTarget());
@@ -2365,8 +2443,7 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: HTemplateInstruction<0>(HType::TypeFromValue(handle)),
- handle_(handle),
- unique_id_(),
+ object_(Unique<Object>::CreateUninitialized(handle)),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2375,29 +2452,28 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
is_not_in_new_space_(true),
is_cell_(false),
boolean_value_(handle->BooleanValue()) {
- if (handle_->IsHeapObject()) {
+ if (handle->IsHeapObject()) {
Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
is_not_in_new_space_ = !heap->InNewSpace(*handle);
}
- if (handle_->IsNumber()) {
- double n = handle_->Number();
+ if (handle->IsNumber()) {
+ double n = handle->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
} else {
- is_internalized_string_ = handle_->IsInternalizedString();
+ is_internalized_string_ = handle->IsInternalizedString();
}
- is_cell_ = !handle_.is_null() &&
- (handle_->IsCell() || handle_->IsPropertyCell());
+ is_cell_ = !handle.is_null() &&
+ (handle->IsCell() || handle->IsPropertyCell());
Initialize(r);
}
-HConstant::HConstant(Handle<Object> handle,
- UniqueValueId unique_id,
+HConstant::HConstant(Unique<Object> unique,
Representation r,
HType type,
bool is_internalize_string,
@@ -2405,8 +2481,7 @@ HConstant::HConstant(Handle<Object> handle,
bool is_cell,
bool boolean_value)
: HTemplateInstruction<0>(type),
- handle_(handle),
- unique_id_(unique_id),
+ object_(unique),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2415,36 +2490,17 @@ HConstant::HConstant(Handle<Object> handle,
is_not_in_new_space_(is_not_in_new_space),
is_cell_(is_cell),
boolean_value_(boolean_value) {
- ASSERT(!handle.is_null());
+ ASSERT(!unique.handle().is_null());
ASSERT(!type.IsTaggedNumber());
Initialize(r);
}
-HConstant::HConstant(Handle<Map> handle,
- UniqueValueId unique_id)
- : HTemplateInstruction<0>(HType::Tagged()),
- handle_(handle),
- unique_id_(unique_id),
- has_smi_value_(false),
- has_int32_value_(false),
- has_double_value_(false),
- has_external_reference_value_(false),
- is_internalized_string_(false),
- is_not_in_new_space_(true),
- is_cell_(false),
- boolean_value_(false) {
- ASSERT(!handle.is_null());
- Initialize(Representation::Tagged());
-}
-
-
HConstant::HConstant(int32_t integer_value,
Representation r,
bool is_not_in_new_space,
- Handle<Object> optional_handle)
- : handle_(optional_handle),
- unique_id_(),
+ Unique<Object> object)
+ : object_(object),
has_smi_value_(Smi::IsValid(integer_value)),
has_int32_value_(true),
has_double_value_(true),
@@ -2463,9 +2519,8 @@ HConstant::HConstant(int32_t integer_value,
HConstant::HConstant(double double_value,
Representation r,
bool is_not_in_new_space,
- Handle<Object> optional_handle)
- : handle_(optional_handle),
- unique_id_(),
+ Unique<Object> object)
+ : object_(object),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
has_external_reference_value_(false),
@@ -2483,6 +2538,7 @@ HConstant::HConstant(double double_value,
HConstant::HConstant(ExternalReference reference)
: HTemplateInstruction<0>(HType::None()),
+ object_(Unique<Object>(Handle<Object>::null())),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2496,14 +2552,6 @@ HConstant::HConstant(ExternalReference reference)
}
-static void PrepareConstant(Handle<Object> object) {
- if (!object->IsJSObject()) return;
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (!js_object->map()->is_deprecated()) return;
- JSObject::TryMigrateInstance(js_object);
-}
-
-
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
if (has_smi_value_ && SmiValuesAre31Bits()) {
@@ -2515,7 +2563,14 @@ void HConstant::Initialize(Representation r) {
} else if (has_external_reference_value_) {
r = Representation::External();
} else {
- PrepareConstant(handle_);
+ Handle<Object> object = object_.handle();
+ if (object->IsJSObject()) {
+ // Try to eagerly migrate JSObjects that have deprecated maps.
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (js_object->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(js_object);
+ }
+ }
r = Representation::Tagged();
}
}
@@ -2526,9 +2581,12 @@ void HConstant::Initialize(Representation r) {
bool HConstant::EmitAtUses() {
ASSERT(IsLinked());
- if (block()->graph()->has_osr()) {
- return block()->graph()->IsStandardConstant(this);
+ if (block()->graph()->has_osr() &&
+ block()->graph()->IsStandardConstant(this)) {
+ // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
+ return true;
}
+ if (UseCount() == 0) return true;
if (IsCell()) return false;
if (representation().IsDouble()) return false;
return true;
@@ -2541,17 +2599,16 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsDouble() && !has_double_value_) return NULL;
if (r.IsExternal() && !has_external_reference_value_) return NULL;
if (has_int32_value_) {
- return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_);
+ return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, object_);
}
if (has_double_value_) {
- return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_);
+ return new(zone) HConstant(double_value_, r, is_not_in_new_space_, object_);
}
if (has_external_reference_value_) {
return new(zone) HConstant(external_reference_value_);
}
- ASSERT(!handle_.is_null());
- return new(zone) HConstant(handle_,
- unique_id_,
+ ASSERT(!object_.handle().is_null());
+ return new(zone) HConstant(object_,
r,
type_,
is_internalized_string_,
@@ -2567,16 +2624,12 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
res = new(zone) HConstant(int32_value_,
Representation::Integer32(),
is_not_in_new_space_,
- handle_);
+ object_);
} else if (has_double_value_) {
res = new(zone) HConstant(DoubleToInt32(double_value_),
Representation::Integer32(),
is_not_in_new_space_,
- handle_);
- } else {
- ASSERT(!HasNumberValue());
- Maybe<HConstant*> number = CopyToTruncatedNumber(zone);
- if (number.has_value) return number.value->CopyToTruncatedInt32(zone);
+ object_);
}
return Maybe<HConstant*>(res != NULL, res);
}
@@ -2624,6 +2677,12 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
+
+ if (representation().IsSmi() && HasNonSmiUse()) {
+ UpdateRepresentation(
+ Representation::Integer32(), h_infer, "use requirements");
+ }
+
if (observed_output_representation_.IsNone()) {
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
@@ -2631,11 +2690,6 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
new_rep = RepresentationFromOutput();
UpdateRepresentation(new_rep, h_infer, "output");
}
-
- if (representation().IsSmi() && HasNonSmiUse()) {
- UpdateRepresentation(
- Representation::Integer32(), h_infer, "use requirements");
- }
}
@@ -2662,7 +2716,7 @@ bool HBinaryOperation::IgnoreObservedOutputRepresentation(
return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
(current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
// Mul in Integer32 mode would be too precise.
- !this->IsMul();
+ (!this->IsMul() || HMul::cast(this)->MulMinusOne());
}
@@ -2802,6 +2856,9 @@ Range* HShl::InferRange(Zone* zone) {
Range* HLoadNamedField::InferRange(Zone* zone) {
+ if (access().representation().IsByte()) {
+ return new(zone) Range(0, 255);
+ }
if (access().IsStringLength()) {
return new(zone) Range(0, String::kMaxLength);
}
@@ -2859,15 +2916,23 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
}
-void HCompareHoleAndBranch::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
+bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (left()->IsConstant() && right()->IsConstant()) {
+ bool comparison_result =
+ HConstant::cast(left())->Equals(HConstant::cast(right()));
+ *block = comparison_result
+ ? FirstSuccessor()
+ : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
}
void HCompareHoleAndBranch::InferRepresentation(
HInferRepresentationPhase* h_infer) {
- ChangeRepresentation(object()->representation());
+ ChangeRepresentation(value()->representation());
}
@@ -2937,22 +3002,17 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->HasMap(map)) {
- check_map->omit(info);
+ // TODO(titzer): collect dependent map checks into a list.
+ check_map->omit_ = true;
+ if (map->CanTransition()) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ }
}
return check_map;
}
-void HCheckMaps::FinalizeUniqueValueId() {
- if (!map_unique_ids_.is_empty()) return;
- Zone* zone = block()->zone();
- map_unique_ids_.Initialize(map_set_.length(), zone);
- for (int i = 0; i < map_set_.length(); i++) {
- map_unique_ids_.Add(UniqueValueId(map_set_.at(i)), zone);
- }
-}
-
-
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -3148,19 +3208,19 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- ElementsKind from_kind = original_map()->elements_kind();
- ElementsKind to_kind = transitioned_map()->elements_kind();
+ ElementsKind from_kind = original_map().handle()->elements_kind();
+ ElementsKind to_kind = transitioned_map().handle()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
- *original_map(),
+ *original_map().handle(),
ElementsAccessor::ForKind(from_kind)->name(),
- *transitioned_map(),
+ *transitioned_map().handle(),
ElementsAccessor::ForKind(to_kind)->name());
if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
}
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p]", *cell());
+ stream->Add("[%p]", *cell().handle());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
@@ -3188,7 +3248,7 @@ void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p] = ", *cell());
+ stream->Add("[%p] = ", *cell().handle());
value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
@@ -3454,8 +3514,8 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
HConstant* filler_map = HConstant::New(
zone,
context(),
- isolate()->factory()->free_space_map(),
- UniqueValueId::free_space_map(isolate()->heap()));
+ isolate()->factory()->free_space_map());
+ filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready
filler_map->InsertAfter(free_space_instr);
HInstruction* store_map = HStoreNamedField::New(zone, context(),
free_space_instr, HObjectAccess::ForMap(), filler_map);
@@ -4004,7 +4064,7 @@ Representation HValue::RepresentationFromUseRequirements() {
Representation rep = Representation::None();
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
// Ignore the use requirement from never run code
- if (it.value()->block()->IsDeoptimizing()) continue;
+ if (it.value()->block()->IsUnreachable()) continue;
// We check for observed_input_representation elsewhere.
Representation use_rep =
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 7d33141a4f..80773bf147 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -36,6 +36,7 @@
#include "deoptimizer.h"
#include "small-pointer-list.h"
#include "string-stream.h"
+#include "unique.h"
#include "v8conversions.h"
#include "v8utils.h"
#include "zone.h"
@@ -63,6 +64,7 @@ class LChunkBuilder;
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
@@ -124,11 +126,9 @@ class LChunkBuilder;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
- V(IsNumberAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -143,6 +143,7 @@ class LChunkBuilder;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
@@ -305,64 +306,6 @@ class Range V8_FINAL : public ZoneObject {
};
-class UniqueValueId V8_FINAL {
- public:
- UniqueValueId() : raw_address_(NULL) { }
-
- explicit UniqueValueId(Handle<Object> handle) {
- ASSERT(!AllowHeapAllocation::IsAllowed());
- static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
- if (handle.is_null()) {
- raw_address_ = kEmptyHandleSentinel;
- } else {
- raw_address_ = reinterpret_cast<Address>(*handle);
- ASSERT_NE(kEmptyHandleSentinel, raw_address_);
- }
- ASSERT(IsInitialized());
- }
-
- bool IsInitialized() const { return raw_address_ != NULL; }
-
- bool operator==(const UniqueValueId& other) const {
- ASSERT(IsInitialized() && other.IsInitialized());
- return raw_address_ == other.raw_address_;
- }
-
- bool operator!=(const UniqueValueId& other) const {
- ASSERT(IsInitialized() && other.IsInitialized());
- return raw_address_ != other.raw_address_;
- }
-
- intptr_t Hashcode() const {
- ASSERT(IsInitialized());
- return reinterpret_cast<intptr_t>(raw_address_);
- }
-
-#define IMMOVABLE_UNIQUE_VALUE_ID(name) \
- static UniqueValueId name(Heap* heap) { return UniqueValueId(heap->name()); }
-
- IMMOVABLE_UNIQUE_VALUE_ID(free_space_map)
- IMMOVABLE_UNIQUE_VALUE_ID(minus_zero_value)
- IMMOVABLE_UNIQUE_VALUE_ID(nan_value)
- IMMOVABLE_UNIQUE_VALUE_ID(undefined_value)
- IMMOVABLE_UNIQUE_VALUE_ID(null_value)
- IMMOVABLE_UNIQUE_VALUE_ID(true_value)
- IMMOVABLE_UNIQUE_VALUE_ID(false_value)
- IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value)
- IMMOVABLE_UNIQUE_VALUE_ID(empty_string)
-
-#undef IMMOVABLE_UNIQUE_VALUE_ID
-
- private:
- Address raw_address_;
-
- explicit UniqueValueId(Object* object) {
- raw_address_ = reinterpret_cast<Address>(object);
- ASSERT(IsInitialized());
- }
-};
-
-
class HType V8_FINAL {
public:
static HType None() { return HType(kNone); }
@@ -695,6 +638,8 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
+ virtual int position() const { return RelocInfo::kNoPosition; }
+
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
int LoopWeight() const;
@@ -777,16 +722,24 @@ class HValue : public ZoneObject {
return index == kNoRedefinedOperand ? NULL : OperandAt(index);
}
+ bool CanReplaceWithDummyUses();
+
+ virtual int argument_delta() const { return 0; }
+
// A purely informative definition is an idef that will not emit code and
// should therefore be removed from the graph in the RestoreActualValues
// phase (so that live ranges will be shorter).
virtual bool IsPurelyInformativeDefinition() { return false; }
- // This method must always return the original HValue SSA definition
- // (regardless of any iDef of this value).
+ // This method must always return the original HValue SSA definition,
+ // regardless of any chain of iDefs of this value.
HValue* ActualValue() {
- int index = RedefinedOperandIndex();
- return index == kNoRedefinedOperand ? this : OperandAt(index);
+ HValue* value = this;
+ int index;
+ while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) {
+ value = value->OperandAt(index);
+ }
+ return value;
}
bool IsInteger32Constant();
@@ -815,6 +768,9 @@ class HValue : public ZoneObject {
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+ void CopyFlag(Flag f, HValue* other) {
+ if (other->CheckFlag(f)) SetFlag(f);
+ }
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;
@@ -898,7 +854,7 @@ class HValue : public ZoneObject {
virtual intptr_t Hashcode();
// Compute unique ids upfront that is safe wrt GC and concurrent compilation.
- virtual void FinalizeUniqueValueId() { }
+ virtual void FinalizeUniqueness() { }
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
@@ -1104,6 +1060,47 @@ class HValue : public ZoneObject {
return new(zone) I(p1, p2, p3, p4, p5); \
}
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
+ static I* New(Zone* zone, HValue* context) { \
+ return new(zone) I(context); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \
+ static I* New(Zone* zone, HValue* context, P1 p1) { \
+ return new(zone) I(context, p1); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \
+ return new(zone) I(context, p1, p2); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \
+ return new(zone) I(context, p1, p2, p3); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4) { \
+ return new(zone) I(context, p1, p2, p3, p4); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5) { \
+ return new(zone) I(context, p1, p2, p3, p4, p5); \
+ }
+
class HInstruction : public HValue {
public:
@@ -1119,7 +1116,7 @@ class HInstruction : public HValue {
void InsertAfter(HInstruction* previous);
// The position is a write-once variable.
- int position() const { return position_; }
+ virtual int position() const V8_OVERRIDE { return position_; }
bool has_position() const { return position_ != RelocInfo::kNoPosition; }
void set_position(int position) {
ASSERT(!has_position());
@@ -1194,6 +1191,11 @@ class HControlInstruction : public HInstruction {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) {
+ *block = NULL;
+ return false;
+ }
+
HBasicBlock* FirstSuccessor() {
return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
}
@@ -1201,6 +1203,12 @@ class HControlInstruction : public HInstruction {
return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
}
+ void Not() {
+ HBasicBlock* swap = SuccessorAt(0);
+ SetSuccessorAt(0, SuccessorAt(1));
+ SetSuccessorAt(1, swap);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
};
@@ -1277,53 +1285,74 @@ class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
};
-class HDeoptimize V8_FINAL : public HTemplateInstruction<0> {
+// Inserts an int3/stop break instruction for debugging purposes.
+class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HDeoptimize, const char*,
- Deoptimizer::BailoutType);
+ DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- const char* reason() const { return reason_; }
- Deoptimizer::BailoutType type() { return type_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
- private:
- explicit HDeoptimize(const char* reason, Deoptimizer::BailoutType type)
- : reason_(reason), type_(type) {}
-
- const char* reason_;
- Deoptimizer::BailoutType type_;
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
};
-// Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
+class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
+ explicit HGoto(HBasicBlock* target) {
+ SetSuccessorAt(0, target);
+ }
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ *block = FirstSuccessor();
+ return true;
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
+class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
- explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation) {
+ return new(zone) HDeoptimize(reason, type, unreachable_continuation);
+ }
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ *block = NULL;
+ return true;
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ const char* reason() const { return reason_; }
+ Deoptimizer::BailoutType type() { return type_; }
- DECLARE_CONCRETE_INSTRUCTION(Goto)
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ private:
+ explicit HDeoptimize(const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation)
+ : reason_(reason), type_(type) {
+ SetSuccessorAt(0, unreachable_continuation);
+ }
+
+ const char* reason_;
+ Deoptimizer::BailoutType type_;
};
@@ -1345,20 +1374,20 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
class HBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HBranch(HValue* value,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {
- SetFlag(kAllowUndefinedAsNaN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
+ ToBooleanStub::Types);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
+ ToBooleanStub::Types,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
virtual Representation observed_input_representation(int index) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
}
@@ -1366,24 +1395,28 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
+ HBranch(HValue* value,
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ expected_input_types_(expected_input_types) {
+ SetFlag(kAllowUndefinedAsNaN);
+ }
+
ToBooleanStub::Types expected_input_types_;
};
class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
- HCompareMap(HValue* value,
- Handle<Map> map,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(!map.is_null());
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
+ HBasicBlock*, HBasicBlock*);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> map() const { return map_; }
+ Unique<Map> map() const { return map_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1395,7 +1428,16 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
virtual int RedefinedOperandIndex() { return 0; }
private:
- Handle<Map> map_;
+ HCompareMap(HValue* value,
+ Handle<Map> map,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ map_(Unique<Map>(map)) {
+ ASSERT(!map.is_null());
+ }
+
+ Unique<Map> map_;
};
@@ -1426,18 +1468,8 @@ class HContext V8_FINAL : public HTemplateInstruction<0> {
class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* value,
- HValue* parameter_count) {
- return new(zone) HReturn(value, context, parameter_count);
- }
-
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* value) {
- return new(zone) HReturn(value, context, 0);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1452,7 +1484,7 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
DECLARE_CONCRETE_INSTRUCTION(Return)
private:
- HReturn(HValue* value, HValue* context, HValue* parameter_count) {
+ HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) {
SetOperandAt(0, value);
SetOperandAt(1, context);
SetOperandAt(2, parameter_count);
@@ -1460,6 +1492,20 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
};
+class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
+ private:
+ HAbnormalExit() {}
+};
+
+
class HUnaryOperation : public HTemplateInstruction<1> {
public:
HUnaryOperation(HValue* value, HType type = HType::Tagged())
@@ -1478,11 +1524,7 @@ class HUnaryOperation : public HTemplateInstruction<1> {
class HThrow V8_FINAL : public HTemplateInstruction<2> {
public:
- static HThrow* New(Zone* zone,
- HValue* context,
- HValue* value) {
- return new(zone) HThrow(context, value);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1738,8 +1780,7 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
public:
enum Kind { BIND, LOOKUP };
- HEnvironmentMarker(Kind kind, int index)
- : kind_(kind), index_(index), next_simulate_(NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int);
Kind kind() { return kind_; }
int index() { return index_; }
@@ -1766,6 +1807,9 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker);
private:
+ HEnvironmentMarker(Kind kind, int index)
+ : kind_(kind), index_(index), next_simulate_(NULL) { }
+
Kind kind_;
int index_;
HSimulate* next_simulate_;
@@ -1783,7 +1827,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
kBackwardsBranch
};
- DECLARE_INSTRUCTION_FACTORY_P2(HStackCheck, HValue*, Type);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type);
HValue* context() { return OperandAt(0); }
@@ -1898,13 +1942,24 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
public:
- HLeaveInlined() { }
+ HLeaveInlined(HEnterInlined* entry,
+ int drop_count)
+ : entry_(entry),
+ drop_count_(drop_count) { }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
+ virtual int argument_delta() const V8_OVERRIDE {
+ return entry_->arguments_pushed() ? -drop_count_ : 0;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
+
+ private:
+ HEnterInlined* entry_;
+ int drop_count_;
};
@@ -1916,6 +1971,7 @@ class HPushArgument V8_FINAL : public HUnaryOperation {
return Representation::Tagged();
}
+ virtual int argument_delta() const V8_OVERRIDE { return 1; }
HValue* argument() { return OperandAt(0); }
DECLARE_CONCRETE_INSTRUCTION(PushArgument)
@@ -1929,10 +1985,7 @@ class HPushArgument V8_FINAL : public HUnaryOperation {
class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
public:
- HThisFunction() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
@@ -1944,6 +1997,11 @@ class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
+ HThisFunction() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -1973,22 +2031,9 @@ class HOuterContext V8_FINAL : public HUnaryOperation {
class HDeclareGlobals V8_FINAL : public HUnaryOperation {
public:
- HDeclareGlobals(HValue* context,
- Handle<FixedArray> pairs,
- int flags)
- : HUnaryOperation(context),
- pairs_(pairs),
- flags_(flags) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- static HDeclareGlobals* New(Zone* zone,
- HValue* context,
- Handle<FixedArray> pairs,
- int flags) {
- return new(zone) HDeclareGlobals(context, pairs, flags);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
+ Handle<FixedArray>,
+ int);
HValue* context() { return OperandAt(0); }
Handle<FixedArray> pairs() const { return pairs_; }
@@ -2001,6 +2046,16 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
}
private:
+ HDeclareGlobals(HValue* context,
+ Handle<FixedArray> pairs,
+ int flags)
+ : HUnaryOperation(context),
+ pairs_(pairs),
+ flags_(flags) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<FixedArray> pairs_;
int flags_;
};
@@ -2008,14 +2063,7 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
class HGlobalObject V8_FINAL : public HUnaryOperation {
public:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- static HGlobalObject* New(Zone* zone, HValue* context) {
- return new(zone) HGlobalObject(context);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(HGlobalObject);
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
@@ -2027,6 +2075,11 @@ class HGlobalObject V8_FINAL : public HUnaryOperation {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
+ explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -2068,7 +2121,13 @@ class HCall : public HTemplateInstruction<V> {
return HType::Tagged();
}
- virtual int argument_count() const { return argument_count_; }
+ virtual int argument_count() const {
+ return argument_count_;
+ }
+
+ virtual int argument_delta() const V8_OVERRIDE {
+ return -argument_count();
+ }
virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
@@ -2117,16 +2176,7 @@ class HBinaryCall : public HCall<2> {
class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
-
- static HInvokeFunction* New(Zone* zone,
- HValue* context,
- HValue* function,
- int argument_count) {
- return new(zone) HInvokeFunction(context, function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
HInvokeFunction(HValue* context,
HValue* function,
@@ -2155,6 +2205,10 @@ class HInvokeFunction V8_FINAL : public HBinaryCall {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private:
+ HInvokeFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
+ }
+
Handle<JSFunction> known_function_;
int formal_parameter_count_;
};
@@ -2162,10 +2216,9 @@ class HInvokeFunction V8_FINAL : public HBinaryCall {
class HCallConstantFunction V8_FINAL : public HCall<0> {
public:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count),
- function_(function),
- formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+ DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction,
+ Handle<JSFunction>,
+ int);
Handle<JSFunction> function() const { return function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
@@ -2184,6 +2237,11 @@ class HCallConstantFunction V8_FINAL : public HCall<0> {
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
private:
+ HCallConstantFunction(Handle<JSFunction> function, int argument_count)
+ : HCall<0>(argument_count),
+ function_(function),
+ formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+
Handle<JSFunction> function_;
int formal_parameter_count_;
};
@@ -2191,22 +2249,23 @@ class HCallConstantFunction V8_FINAL : public HCall<0> {
class HCallKeyed V8_FINAL : public HBinaryCall {
public:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int);
HValue* context() { return first(); }
HValue* key() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
+
+ private:
+ HCallKeyed(HValue* context, HValue* key, int argument_count)
+ : HBinaryCall(context, key, argument_count) {
+ }
};
class HCallNamed V8_FINAL : public HUnaryCall {
public:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2216,42 +2275,33 @@ class HCallNamed V8_FINAL : public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
private:
+ HCallNamed(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
+
Handle<String> name_;
};
class HCallFunction V8_FINAL : public HBinaryCall {
public:
- HCallFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
-
- static HCallFunction* New(Zone* zone,
- HValue* context,
- HValue* function,
- int argument_count) {
- return new(zone) HCallFunction(context, function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
HValue* context() { return first(); }
HValue* function() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+
+ private:
+ HCallFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
+ }
};
class HCallGlobal V8_FINAL : public HUnaryCall {
public:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
-
- static HCallGlobal* New(Zone* zone,
- HValue* context,
- Handle<String> name,
- int argument_count) {
- return new(zone) HCallGlobal(context, name, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2261,16 +2311,17 @@ class HCallGlobal V8_FINAL : public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
+ HCallGlobal(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
+
Handle<String> name_;
};
class HCallKnownGlobal V8_FINAL : public HCall<0> {
public:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count),
- target_(target),
- formal_parameter_count_(target->shared()->formal_parameter_count()) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCallKnownGlobal, Handle<JSFunction>, int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2284,6 +2335,11 @@ class HCallKnownGlobal V8_FINAL : public HCall<0> {
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
private:
+ HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
+ : HCall<0>(argument_count),
+ target_(target),
+ formal_parameter_count_(target->shared()->formal_parameter_count()) { }
+
Handle<JSFunction> target_;
int formal_parameter_count_;
};
@@ -2291,23 +2347,26 @@ class HCallKnownGlobal V8_FINAL : public HCall<0> {
class HCallNew V8_FINAL : public HBinaryCall {
public:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {}
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
HValue* context() { return first(); }
HValue* constructor() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew)
+
+ private:
+ HCallNew(HValue* context, HValue* constructor, int argument_count)
+ : HBinaryCall(context, constructor, argument_count) {}
};
class HCallNewArray V8_FINAL : public HBinaryCall {
public:
- HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- Handle<Cell> type_cell, ElementsKind elements_kind)
- : HBinaryCall(context, constructor, argument_count),
- elements_kind_(elements_kind),
- type_cell_(type_cell) {}
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray,
+ HValue*,
+ int,
+ Handle<Cell>,
+ ElementsKind);
HValue* context() { return first(); }
HValue* constructor() { return second(); }
@@ -2323,6 +2382,12 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
+ HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+ Handle<Cell> type_cell, ElementsKind elements_kind)
+ : HBinaryCall(context, constructor, argument_count),
+ elements_kind_(elements_kind),
+ type_cell_(type_cell) {}
+
ElementsKind elements_kind_;
Handle<Cell> type_cell_;
};
@@ -2330,19 +2395,20 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
class HCallRuntime V8_FINAL : public HCall<1> {
public:
- static HCallRuntime* New(Zone* zone,
- HValue* context,
- Handle<String> name,
- const Runtime::Function* c_function,
- int argument_count) {
- return new(zone) HCallRuntime(context, name, c_function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
+ Handle<String>,
+ const Runtime::Function*,
+ int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
+ SaveFPRegsMode save_doubles() const { return save_doubles_; }
+ void set_save_doubles(SaveFPRegsMode save_doubles) {
+ save_doubles_ = save_doubles;
+ }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -2355,12 +2421,14 @@ class HCallRuntime V8_FINAL : public HCall<1> {
Handle<String> name,
const Runtime::Function* c_function,
int argument_count)
- : HCall<1>(argument_count), c_function_(c_function), name_(name) {
+ : HCall<1>(argument_count), c_function_(c_function), name_(name),
+ save_doubles_(kDontSaveFPRegs) {
SetOperandAt(0, context);
}
const Runtime::Function* c_function_;
Handle<String> name_;
+ SaveFPRegsMode save_doubles_;
};
@@ -2509,6 +2577,40 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
};
+class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ Heap::RootListIndex index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HLoadRoot* b = HLoadRoot::cast(other);
+ return index_ == b->index_;
+ }
+
+ private:
+ HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
+ : HTemplateInstruction<0>(type), index_(index) {
+ SetFlag(kUseGVN);
+ // TODO(bmeurer): We'll need kDependsOnRoots once we add the
+ // corresponding HStoreRoot instruction.
+ SetGVNFlag(kDependsOnCalls);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ const Heap::RootListIndex index_;
+};
+
+
class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
@@ -2553,7 +2655,6 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
for (int i = 0; i < maps->length(); i++) {
check_map->Add(maps->at(i), zone);
}
- check_map->map_set_.Sort();
return check_map;
}
@@ -2568,38 +2669,26 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
- SmallMapList* map_set() { return &map_set_; }
- ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; }
- bool has_migration_target() {
+ Unique<Map> first_map() const { return map_set_.at(0); }
+ UniqueSet<Map> map_set() const { return map_set_; }
+
+ bool has_migration_target() const {
return has_migration_target_;
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
- HCheckMaps* b = HCheckMaps::cast(other);
- // Relies on the fact that map_set has been sorted before.
- if (map_unique_ids_.length() != b->map_unique_ids_.length()) {
- return false;
- }
- for (int i = 0; i < map_unique_ids_.length(); i++) {
- if (map_unique_ids_.at(i) != b->map_unique_ids_.at(i)) {
- return false;
- }
- }
- return true;
+ return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_);
}
virtual int RedefinedOperandIndex() { return 0; }
private:
void Add(Handle<Map> map, Zone* zone) {
- map_set_.Add(map, zone);
+ map_set_.Add(Unique<Map>(map), zone);
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
SetGVNFlag(kChangesNewSpacePromotion);
@@ -2609,10 +2698,9 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false), map_unique_ids_(0, zone) {
+ omit_(false), has_migration_target_(false) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
- // TODO(titzer): do GVN flags already express this dependency?
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
@@ -2621,36 +2709,33 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
SetGVNFlag(kDependsOnElementsKind);
}
- void omit(CompilationInfo* info) {
- omit_ = true;
- for (int i = 0; i < map_set_.length(); i++) {
- Handle<Map> map = map_set_.at(i);
- if (!map->CanTransition()) continue;
- map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
- info);
- }
- }
-
bool omit_;
bool has_migration_target_;
- SmallMapList map_set_;
- ZoneList<UniqueValueId> map_unique_ids_;
+ UniqueSet<Map> map_set_;
};
class HCheckValue V8_FINAL : public HUnaryOperation {
public:
static HCheckValue* New(Zone* zone, HValue* context,
- HValue* value, Handle<JSFunction> target) {
- bool in_new_space = zone->isolate()->heap()->InNewSpace(*target);
+ HValue* value, Handle<JSFunction> func) {
+ bool in_new_space = zone->isolate()->heap()->InNewSpace(*func);
+ // NOTE: We create an uninitialized Unique and initialize it later.
+ // This is because a JSFunction can move due to GC during graph creation.
+ // TODO(titzer): This is a migration crutch. Replace with some kind of
+ // Uniqueness scope later.
+ Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
return check;
}
static HCheckValue* New(Zone* zone, HValue* context,
- HValue* value, Handle<Map> map, UniqueValueId id) {
- HCheckValue* check = new(zone) HCheckValue(value, map, false);
- check->object_unique_id_ = id;
- return check;
+ HValue* value, Unique<HeapObject> target,
+ bool object_in_new_space) {
+ return new(zone) HCheckValue(value, target, object_in_new_space);
+ }
+
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ object_ = Unique<HeapObject>(object_.handle());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -2664,11 +2749,7 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
virtual void Verify() V8_OVERRIDE;
#endif
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- object_unique_id_ = UniqueValueId(object_);
- }
-
- Handle<HeapObject> object() const { return object_; }
+ Unique<HeapObject> object() const { return object_; }
bool object_in_new_space() const { return object_in_new_space_; }
DECLARE_CONCRETE_INSTRUCTION(CheckValue)
@@ -2676,38 +2757,35 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HCheckValue* b = HCheckValue::cast(other);
- return object_unique_id_ == b->object_unique_id_;
+ return object_ == b->object_;
}
private:
- HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space)
+ HCheckValue(HValue* value, Unique<HeapObject> object,
+ bool object_in_new_space)
: HUnaryOperation(value, value->type()),
- object_(object), object_in_new_space_(in_new_space) {
+ object_(object),
+ object_in_new_space_(object_in_new_space) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- Handle<HeapObject> object_;
- UniqueValueId object_unique_id_;
+ Unique<HeapObject> object_;
bool object_in_new_space_;
};
class HCheckInstanceType V8_FINAL : public HUnaryOperation {
public:
- static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
- }
- static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
- }
- static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_STRING);
- }
- static HCheckInstanceType* NewIsInternalizedString(
- HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
- }
+ enum Check {
+ IS_SPEC_OBJECT,
+ IS_JS_ARRAY,
+ IS_STRING,
+ IS_INTERNALIZED_STRING,
+ LAST_INTERVAL_CHECK = IS_JS_ARRAY
+ };
+
+ DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2735,14 +2813,6 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
virtual int RedefinedOperandIndex() { return 0; }
private:
- enum Check {
- IS_SPEC_OBJECT,
- IS_JS_ARRAY,
- IS_STRING,
- IS_INTERNALIZED_STRING,
- LAST_INTERVAL_CHECK = IS_JS_ARRAY
- };
-
const char* GetCheckName();
HCheckInstanceType(HValue* value, Check check)
@@ -2784,21 +2854,6 @@ class HCheckSmi V8_FINAL : public HUnaryOperation {
};
-class HIsNumberAndBranch V8_FINAL : public HUnaryControlInstruction {
- public:
- explicit HIsNumberAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
- SetFlag(kFlexibleRepresentation);
- }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch)
-};
-
-
class HCheckHeapObject V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
@@ -3090,6 +3145,8 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
+ virtual int position() const V8_OVERRIDE;
+
int merged_index() const { return merged_index_; }
InductionVariableData* induction_variable_data() {
@@ -3260,6 +3317,8 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
// Replay effects of this instruction on the given environment.
void ReplayEnvironment(HEnvironment* env);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
private:
@@ -3273,7 +3332,6 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
- DECLARE_INSTRUCTION_FACTORY_P2(HConstant, Handle<Map>, UniqueValueId);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
static HConstant* CreateAndInsertAfter(Zone* zone,
@@ -3298,16 +3356,27 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return new_constant;
}
+ static HConstant* CreateAndInsertBefore(Zone* zone,
+ Unique<Object> unique,
+ bool is_not_in_new_space,
+ HInstruction* instruction) {
+ HConstant* new_constant = new(zone) HConstant(unique,
+ Representation::Tagged(), HType::Tagged(), false, is_not_in_new_space,
+ false, false);
+ new_constant->InsertBefore(instruction);
+ return new_constant;
+ }
+
Handle<Object> handle(Isolate* isolate) {
- if (handle_.is_null()) {
- Factory* factory = isolate->factory();
+ if (object_.handle().is_null()) {
// Default arguments to is_not_in_new_space depend on this heap number
- // to be tenured so that it's guaranteed not be be located in new space.
- handle_ = factory->NewNumber(double_value_, TENURED);
+ // to be tenured so that it's guaranteed not to be located in new space.
+ object_ = Unique<Object>::CreateUninitialized(
+ isolate->factory()->NewNumber(double_value_, TENURED));
}
AllowDeferredHandleDereference smi_check;
- ASSERT(has_int32_value_ || !handle_->IsSmi());
- return handle_;
+ ASSERT(has_int32_value_ || !object_.handle()->IsSmi());
+ return object_.handle();
}
bool HasMap(Handle<Map> map) {
@@ -3341,16 +3410,18 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return false;
}
- ASSERT(!handle_.is_null());
+ ASSERT(!object_.handle().is_null());
Heap* heap = isolate()->heap();
- ASSERT(unique_id_ != UniqueValueId::minus_zero_value(heap));
- ASSERT(unique_id_ != UniqueValueId::nan_value(heap));
- return unique_id_ == UniqueValueId::undefined_value(heap) ||
- unique_id_ == UniqueValueId::null_value(heap) ||
- unique_id_ == UniqueValueId::true_value(heap) ||
- unique_id_ == UniqueValueId::false_value(heap) ||
- unique_id_ == UniqueValueId::the_hole_value(heap) ||
- unique_id_ == UniqueValueId::empty_string(heap);
+ ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
+ ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+ return
+ object_.IsKnownGlobal(heap->undefined_value()) ||
+ object_.IsKnownGlobal(heap->null_value()) ||
+ object_.IsKnownGlobal(heap->true_value()) ||
+ object_.IsKnownGlobal(heap->false_value()) ||
+ object_.IsKnownGlobal(heap->the_hole_value()) ||
+ object_.IsKnownGlobal(heap->empty_string()) ||
+ object_.IsKnownGlobal(heap->empty_fixed_array());
}
bool IsCell() const {
@@ -3389,11 +3460,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
return true;
}
- Heap* heap = isolate()->heap();
- if (!handle_.is_null() && *handle_ == heap->the_hole_value()) {
- return true;
- }
- return false;
+ return object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
}
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
@@ -3405,12 +3472,12 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
}
bool HasStringValue() const {
if (has_double_value_ || has_int32_value_) return false;
- ASSERT(!handle_.is_null());
+ ASSERT(!object_.handle().is_null());
return type_.IsString();
}
Handle<String> StringValue() const {
ASSERT(HasStringValue());
- return Handle<String>::cast(handle_);
+ return Handle<String>::cast(object_.handle());
}
bool HasInternalizedStringValue() const {
return HasStringValue() && is_internalized_string_;
@@ -3434,21 +3501,20 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
} else if (has_external_reference_value_) {
return reinterpret_cast<intptr_t>(external_reference_value_.address());
} else {
- ASSERT(!handle_.is_null());
- return unique_id_.Hashcode();
+ ASSERT(!object_.handle().is_null());
+ return object_.Hashcode();
}
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
if (!has_double_value_ && !has_external_reference_value_) {
- ASSERT(!handle_.is_null());
- unique_id_ = UniqueValueId(handle_);
+ ASSERT(!object_.handle().is_null());
+ object_ = Unique<Object>(object_.handle());
}
}
- bool UniqueValueIdsMatch(UniqueValueId other) {
- return !has_double_value_ && !has_external_reference_value_ &&
- unique_id_ == other;
+ Unique<Object> GetUnique() const {
+ return object_;
}
#ifdef DEBUG
@@ -3474,9 +3540,13 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
external_reference_value_ ==
other_constant->external_reference_value_;
} else {
- ASSERT(!handle_.is_null());
- return !other_constant->handle_.is_null() &&
- unique_id_ == other_constant->unique_id_;
+ if (other_constant->has_int32_value_ ||
+ other_constant->has_double_value_ ||
+ other_constant->has_external_reference_value_) {
+ return false;
+ }
+ ASSERT(!object_.handle().is_null());
+ return other_constant->object_ == object_;
}
}
@@ -3486,33 +3556,30 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
HConstant(int32_t value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
- Handle<Object> optional_handle = Handle<Object>::null());
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
HConstant(double value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
- Handle<Object> optional_handle = Handle<Object>::null());
- HConstant(Handle<Object> handle,
- UniqueValueId unique_id,
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+ HConstant(Unique<Object> unique,
Representation r,
HType type,
bool is_internalized_string,
bool is_not_in_new_space,
bool is_cell,
bool boolean_value);
- HConstant(Handle<Map> handle,
- UniqueValueId unique_id);
+
explicit HConstant(ExternalReference reference);
void Initialize(Representation r);
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
- // If this is a numerical constant, handle_ either points to to the
+ // If this is a numerical constant, object_ either points to the
// HeapObject the constant originated from or is null. If the
- // constant is non-numeric, handle_ always points to a valid
+ // constant is non-numeric, object_ always points to a valid
// constant HeapObject.
- Handle<Object> handle_;
- UniqueValueId unique_id_;
+ Unique<Object> object_;
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
@@ -3649,17 +3716,8 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
public:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, function);
- SetOperandAt(1, receiver);
- SetOperandAt(2, length);
- SetOperandAt(3, elements);
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
+ HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The length is untagged, all other inputs are tagged.
@@ -3674,6 +3732,19 @@ class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
HValue* elements() { return OperandAt(3); }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
+
+ private:
+ HApplyArguments(HValue* function,
+ HValue* receiver,
+ HValue* length,
+ HValue* elements) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, function);
+ SetOperandAt(1, receiver);
+ SetOperandAt(2, length);
+ SetOperandAt(3, elements);
+ SetAllSideEffects();
+ }
};
@@ -3731,13 +3802,7 @@ class HArgumentsLength V8_FINAL : public HUnaryOperation {
class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
public:
- HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetOperandAt(0, arguments);
- SetOperandAt(1, length);
- SetOperandAt(2, index);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -3754,6 +3819,15 @@ class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
+ private:
+ HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetOperandAt(0, arguments);
+ SetOperandAt(1, length);
+ SetOperandAt(2, index);
+ }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
@@ -3882,13 +3956,14 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (!to.IsTagged()) {
- ASSERT(to.IsSmiOrInteger32());
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- } else {
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
ClearFlag(kUseGVN);
+ } else {
+ ClearAllSideEffects();
+ SetFlag(kUseGVN);
}
}
@@ -3920,12 +3995,9 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
public:
- static HMathFloorOfDiv* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- return new(zone) HMathFloorOfDiv(context, left, right);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
+ HValue*,
+ HValue*);
virtual HValue* EnsureAndPropagateNotMinusZero(
BitVector* visited) V8_OVERRIDE;
@@ -3961,7 +4033,9 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) {
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
ClearFlag(kUseGVN);
} else {
@@ -3971,7 +4045,6 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
-
private:
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -3979,16 +4052,8 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
class HCompareGeneric V8_FINAL : public HBinaryOperation {
public:
- HCompareGeneric(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : HBinaryOperation(context, left, right, HType::Boolean()),
- token_(token) {
- ASSERT(Token::IsCompareOp(token));
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*,
+ HValue*, Token::Value);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
@@ -4002,19 +4067,28 @@ class HCompareGeneric V8_FINAL : public HBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
private:
+ HCompareGeneric(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : HBinaryOperation(context, left, right, HType::Boolean()),
+ token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Token::Value token_;
};
class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token)
- : token_(token) {
- SetFlag(kFlexibleRepresentation);
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value);
+ DECLARE_INSTRUCTION_FACTORY_P5(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value,
+ HBasicBlock*, HBasicBlock*);
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4040,25 +4114,30 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
+ HCompareNumericAndBranch(HValue* left,
+ HValue* right,
+ Token::Value token,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : token_(token) {
+ SetFlag(kFlexibleRepresentation);
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
+ }
+
Representation observed_input_representation_[2];
Token::Value token_;
};
-class HCompareHoleAndBranch V8_FINAL
- : public HTemplateControlInstruction<2, 1> {
+class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
- explicit HCompareHoleAndBranch(HValue* object) {
- SetFlag(kFlexibleRepresentation);
- SetFlag(kAllowUndefinedAsNaN);
- SetOperandAt(0, object);
- }
-
DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
-
- HValue* object() { return OperandAt(0); }
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual void InferRepresentation(
HInferRepresentationPhase* h_infer) V8_OVERRIDE;
@@ -4067,23 +4146,44 @@ class HCompareHoleAndBranch V8_FINAL
return representation();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
+
+ private:
+ HCompareHoleAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
+ }
};
class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
HCompareObjectEqAndBranch(HValue* left,
- HValue* right) {
+ HValue* right,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL) {
+ // TODO(danno): make this private when the IfBuilder properly constructs
+ // control flow instructions.
+ ASSERT(!left->IsConstant() ||
+ (!HConstant::cast(left)->HasInteger32Value() ||
+ HConstant::cast(left)->HasSmiValue()));
+ ASSERT(!right->IsConstant() ||
+ (!HConstant::cast(right)->HasInteger32Value() ||
+ HConstant::cast(right)->HasSmiValue()));
SetOperandAt(0, left);
SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
}
DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
+ HBasicBlock*, HBasicBlock*);
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4104,33 +4204,49 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsObjectAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
+
+ private:
+ HIsObjectAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
+
class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsStringAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+
+ private:
+ HIsStringAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsSmiAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
@@ -4140,36 +4256,41 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HIsSmiAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsUndetectableAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
+
+ private:
+ HIsUndetectableAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
public:
- HStringCompareAndBranch(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : token_(token) {
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
+ HValue*,
+ HValue*,
+ Token::Value);
HValue* context() { return OperandAt(0); }
HValue* left() { return OperandAt(1); }
@@ -4189,28 +4310,43 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
private:
+ HStringCompareAndBranch(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
Token::Value token_;
};
class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
public:
+ DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
+ private:
+ HIsConstructCallAndBranch() {}
};
class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
- : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
- HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
- : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
- ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(
+ HHasInstanceTypeAndBranch, HValue*, InstanceType);
+ DECLARE_INSTRUCTION_FACTORY_P3(
+ HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType);
InstanceType from() { return from_; }
InstanceType to() { return to_; }
@@ -4224,6 +4360,13 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
+ ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
+ }
+
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
};
@@ -4231,23 +4374,22 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HHasCachedArrayIndexAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
+ private:
+ explicit HHasCachedArrayIndexAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
};
class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -4259,15 +4401,19 @@ class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
+ explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
- : HUnaryControlInstruction(value, NULL, NULL),
- class_name_(class_name) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
+ Handle<String>);
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
@@ -4280,15 +4426,17 @@ class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
Handle<String> class_name() const { return class_name_; }
private:
+ HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ class_name_(class_name) { }
+
Handle<String> class_name_;
};
class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
- : HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
Handle<String> type_literal() { return type_literal_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -4300,17 +4448,17 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
}
private:
+ HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ type_literal_(type_literal) { }
+
Handle<String> type_literal_;
};
class HInstanceOf V8_FINAL : public HBinaryOperation {
public:
- HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, HType::Boolean()) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -4319,20 +4467,21 @@ class HInstanceOf V8_FINAL : public HBinaryOperation {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
+
+ private:
+ HInstanceOf(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right, HType::Boolean()) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
};
class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
public:
- HInstanceOfKnownGlobal(HValue* context,
- HValue* left,
- Handle<JSFunction> right)
- : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
+ HValue*,
+ Handle<JSFunction>);
HValue* context() { return OperandAt(0); }
HValue* left() { return OperandAt(1); }
@@ -4345,27 +4494,17 @@ class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
private:
- Handle<JSFunction> function_;
-};
-
-
-// TODO(mstarzinger): This instruction should be modeled as a load of the map
-// field followed by a load of the instance size field once HLoadNamedField is
-// flexible enough to accommodate byte-field loads.
-class HInstanceSize V8_FINAL : public HTemplateInstruction<1> {
- public:
- explicit HInstanceSize(HValue* object) {
- SetOperandAt(0, object);
- set_representation(Representation::Integer32());
- }
-
- HValue* object() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ HInstanceOfKnownGlobal(HValue* context,
+ HValue* left,
+ Handle<JSFunction> right)
+ : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
}
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize)
+ Handle<JSFunction> function_;
};
@@ -4410,10 +4549,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
class HRandom V8_FINAL : public HTemplateInstruction<1> {
public:
- explicit HRandom(HValue* global_object) {
- SetOperandAt(0, global_object);
- set_representation(Representation::Double());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HRandom, HValue*);
HValue* global_object() { return OperandAt(0); }
@@ -4424,6 +4560,11 @@ class HRandom V8_FINAL : public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(Random)
private:
+ explicit HRandom(HValue* global_object) {
+ SetOperandAt(0, global_object);
+ set_representation(Representation::Double());
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -4459,8 +4600,19 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) ClearFlag(kAllowUndefinedAsNaN);
- HArithmeticBinaryOperation::RepresentationChanged(to);
+ if (to.IsTagged()) {
+ SetGVNFlag(kChangesNewSpacePromotion);
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
+ left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
+ ClearAllSideEffects();
+ SetFlag(kUseGVN);
+ }
}
DECLARE_CONCRETE_INSTRUCTION(Add)
@@ -4522,10 +4674,12 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
HValue* right);
static HInstruction* NewImul(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- HMul* mul = new(zone) HMul(context, left, right);
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ HInstruction* instr = HMul::New(zone, context, left, right);
+ if (!instr->IsMul()) return instr;
+ HMul* mul = HMul::cast(instr);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
@@ -4548,6 +4702,8 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
+ bool MulMinusOne();
+
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
@@ -4884,9 +5040,11 @@ class HSar V8_FINAL : public HBitwiseBinaryOperation {
class HRor V8_FINAL : public HBitwiseBinaryOperation {
public:
- HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
- ChangeRepresentation(Representation::Integer32());
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ return new(zone) HRor(context, left, right);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -4900,6 +5058,12 @@ class HRor V8_FINAL : public HBitwiseBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HRor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {
+ ChangeRepresentation(Representation::Integer32());
+ }
};
@@ -4971,12 +5135,7 @@ class HParameter V8_FINAL : public HTemplateInstruction<0> {
class HCallStub V8_FINAL : public HUnaryCall {
public:
- HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
- : HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
CodeStub::Major major_key() { return major_key_; }
HValue* context() { return value(); }
@@ -4993,6 +5152,12 @@ class HCallStub V8_FINAL : public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallStub)
private:
+ HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
+ : HUnaryCall(context, argument_count),
+ major_key_(major_key),
+ transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+ }
+
CodeStub::Major major_key_;
TranscendentalCache::Type transcendental_type_;
};
@@ -5036,24 +5201,20 @@ class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
public:
- HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
- : cell_(cell), details_(details), unique_id_() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>,
+ PropertyDetails);
- Handle<Cell> cell() const { return cell_; }
+ Unique<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual intptr_t Hashcode() V8_OVERRIDE {
- return unique_id_.Hashcode();
+ return cell_.Hashcode();
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- unique_id_ = UniqueValueId(cell_);
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ cell_ = Unique<Cell>(cell_.handle());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -5064,32 +5225,28 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return unique_id_ == b->unique_id_;
+ return cell_ == HLoadGlobalCell::cast(other)->cell_;
}
private:
+ HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
+ : cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnGlobalVars);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
- Handle<Cell> cell_;
+ Unique<Cell> cell_;
PropertyDetails details_;
- UniqueValueId unique_id_;
};
class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
- HLoadGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- bool for_typeof)
- : name_(name),
- for_typeof_(for_typeof) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
+ Handle<Object>, bool);
HValue* context() { return OperandAt(0); }
HValue* global_object() { return OperandAt(1); }
@@ -5105,6 +5262,18 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
private:
+ HLoadGlobalGeneric(HValue* context,
+ HValue* global_object,
+ Handle<Object> name,
+ bool for_typeof)
+ : name_(name),
+ for_typeof_(for_typeof) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, global_object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<Object> name_;
bool for_typeof_;
};
@@ -5344,7 +5513,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
Handle<PropertyCell>, PropertyDetails);
- Handle<PropertyCell> cell() const { return cell_; }
+ Unique<PropertyCell> cell() const { return cell_; }
bool RequiresHoleCheck() {
return !details_.IsDontDelete() || details_.IsReadOnly();
}
@@ -5352,6 +5521,10 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
return StoringValueNeedsWriteBarrier(value());
}
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ cell_ = Unique<PropertyCell>(cell_.handle());
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5364,12 +5537,12 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
Handle<PropertyCell> cell,
PropertyDetails details)
: HUnaryOperation(value),
- cell_(cell),
+ cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
SetGVNFlag(kChangesGlobalVars);
}
- Handle<PropertyCell> cell_;
+ Unique<PropertyCell> cell_;
PropertyDetails details_;
};
@@ -5580,6 +5753,18 @@ class HObjectAccess V8_FINAL {
kDouble, HeapNumber::kValueOffset, Representation::Double());
}
+ static HObjectAccess ForHeapNumberValueLowestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForHeapNumberValueHighestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset + kIntSize,
+ Representation::Integer32());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
@@ -5601,12 +5786,9 @@ class HObjectAccess V8_FINAL {
? Representation::Smi() : Representation::Tagged());
}
- static HObjectAccess ForAllocationSiteTransitionInfo() {
- return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
- }
-
- static HObjectAccess ForAllocationSiteWeakNext() {
- return HObjectAccess(kInobject, AllocationSite::kWeakNextOffset);
+ static HObjectAccess ForAllocationSiteOffset(int offset) {
+ ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
+ return HObjectAccess(kInobject, offset);
}
static HObjectAccess ForAllocationSiteList() {
@@ -5669,6 +5851,12 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
+ static HObjectAccess ForMapInstanceSize() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceSizeOffset,
+ Representation::Byte());
+ }
+
static HObjectAccess ForPropertyCellValue() {
return HObjectAccess(kInobject, PropertyCell::kValueOffset);
}
@@ -5798,7 +5986,9 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
SetOperandAt(0, object);
Representation representation = access.representation();
- if (representation.IsSmi()) {
+ if (representation.IsByte()) {
+ set_representation(Representation::Integer32());
+ } else if (representation.IsSmi()) {
set_type(HType::Smi());
set_representation(representation);
} else if (representation.IsDouble() ||
@@ -5823,13 +6013,8 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : name_(name) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*,
+ Handle<Object>);
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
@@ -5844,18 +6029,21 @@ class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
private:
+ HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
+ : name_(name) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<Object> name_;
};
class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
public:
- explicit HLoadFunctionPrototype(HValue* function)
- : HUnaryOperation(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
HValue* function() { return OperandAt(0); }
@@ -5867,6 +6055,14 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HLoadFunctionPrototype(HValue* function)
+ : HUnaryOperation(function) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnCalls);
+ }
};
class ArrayInstructionInterface {
@@ -6054,14 +6250,8 @@ class HLoadKeyed V8_FINAL
class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
- HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*,
+ HValue*);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
@@ -6076,6 +6266,15 @@ class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
virtual HValue* Canonicalize() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
+
+ private:
+ HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, obj);
+ SetOperandAt(1, key);
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
};
@@ -6096,11 +6295,14 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
- } else if (index == 1 &&
- (field_representation().IsDouble() ||
- field_representation().IsSmi() ||
- field_representation().IsInteger32())) {
- return field_representation();
+ } else if (index == 1) {
+ if (field_representation().IsByte() ||
+ field_representation().IsInteger32()) {
+ return Representation::Integer32();
+ } else if (field_representation().IsDouble() ||
+ field_representation().IsSmi()) {
+ return field_representation();
+ }
}
return Representation::Tagged();
}
@@ -6191,19 +6393,9 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
- HStoreNamedGeneric(HValue* context,
- HValue* object,
- Handle<String> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, value);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
+ Handle<String>, HValue*,
+ StrictModeFlag);
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
@@ -6219,6 +6411,19 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
+ HStoreNamedGeneric(HValue* context,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ StrictModeFlag strict_mode_flag)
+ : name_(name),
+ strict_mode_flag_(strict_mode_flag) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, value);
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
+
Handle<String> name_;
StrictModeFlag strict_mode_flag_;
};
@@ -6367,18 +6572,8 @@ class HStoreKeyed V8_FINAL
class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
- HStoreKeyedGeneric(HValue* context,
- HValue* object,
- HValue* key,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, key);
- SetOperandAt(2, value);
- SetOperandAt(3, context);
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
+ HValue*, HValue*, StrictModeFlag);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
@@ -6396,6 +6591,19 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
private:
+ HStoreKeyedGeneric(HValue* context,
+ HValue* object,
+ HValue* key,
+ HValue* value,
+ StrictModeFlag strict_mode_flag)
+ : strict_mode_flag_(strict_mode_flag) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, key);
+ SetOperandAt(2, value);
+ SetOperandAt(3, context);
+ SetAllSideEffects();
+ }
+
StrictModeFlag strict_mode_flag_;
};
@@ -6417,25 +6625,20 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* context() { return OperandAt(1); }
- Handle<Map> original_map() { return original_map_; }
- Handle<Map> transitioned_map() { return transitioned_map_; }
+ Unique<Map> original_map() { return original_map_; }
+ Unique<Map> transitioned_map() { return transitioned_map_; }
ElementsKind from_kind() { return from_kind_; }
ElementsKind to_kind() { return to_kind_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- original_map_unique_id_ = UniqueValueId(original_map_);
- transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
- }
-
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_unique_id_ == instr->original_map_unique_id_ &&
- transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
+ return original_map_ == instr->original_map_ &&
+ transitioned_map_ == instr->transitioned_map_;
}
private:
@@ -6443,10 +6646,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map)
- : original_map_(original_map),
- transitioned_map_(transitioned_map),
- original_map_unique_id_(),
- transitioned_map_unique_id_(),
+ : original_map_(Unique<Map>(original_map)),
+ transitioned_map_(Unique<Map>(transitioned_map)),
from_kind_(original_map->elements_kind()),
to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
@@ -6460,10 +6661,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- Handle<Map> original_map_;
- Handle<Map> transitioned_map_;
- UniqueValueId original_map_unique_id_;
- UniqueValueId transitioned_map_unique_id_;
+ Unique<Map> original_map_;
+ Unique<Map> transitioned_map_;
ElementsKind from_kind_;
ElementsKind to_kind_;
};
@@ -6492,14 +6691,26 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
: HBinaryOperation(context, left, right, HType::String()), flags_(flags) {
set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ if (MightHaveSideEffects()) {
+ SetAllSideEffects();
+ } else {
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
}
- // No side-effects except possible allocation.
- // NOTE: this instruction _does not_ call ToString() on its inputs.
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ bool MightHaveSideEffects() const {
+ return flags_ != STRING_ADD_CHECK_NONE &&
+ (left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved());
+ }
+
+ // No side-effects except possible allocation:
+ // NOTE: this instruction does not call ToString() on its inputs, when flags_
+ // is set to STRING_ADD_CHECK_NONE.
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return !MightHaveSideEffects();
+ }
const StringAddFlags flags_;
};
@@ -6507,12 +6718,9 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
public:
- static HStringCharCodeAt* New(Zone* zone,
- HValue* context,
- HValue* string,
- HValue* index) {
- return new(zone) HStringCharCodeAt(context, string, index);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
+ HValue*,
+ HValue*);
virtual Representation RequiredInputRepresentation(int index) {
// The index is supposed to be Integer32.
@@ -6616,6 +6824,24 @@ class HMaterializedLiteral : public HTemplateInstruction<V> {
class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
+ Handle<FixedArray>,
+ Handle<String>,
+ Handle<String>,
+ int);
+
+ HValue* context() { return OperandAt(0); }
+ Handle<FixedArray> literals() { return literals_; }
+ Handle<String> pattern() { return pattern_; }
+ Handle<String> flags() { return flags_; }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
+
+ private:
HRegExpLiteral(HValue* context,
Handle<FixedArray> literals,
Handle<String> pattern,
@@ -6630,18 +6856,6 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
set_type(HType::JSObject());
}
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> literals() { return literals_; }
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
Handle<FixedArray> literals_;
Handle<String> pattern_;
Handle<String> flags_;
@@ -6650,20 +6864,9 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
public:
- HFunctionLiteral(HValue* context,
- Handle<SharedFunctionInfo> shared,
- bool pretenure)
- : HTemplateInstruction<1>(HType::JSObject()),
- shared_info_(shared),
- pretenure_(pretenure),
- has_no_literals_(shared->num_literals() == 0),
- is_generator_(shared->is_generator()),
- language_mode_(shared->language_mode()) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
+ Handle<SharedFunctionInfo>,
+ bool);
HValue* context() { return OperandAt(0); }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -6679,6 +6882,20 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
LanguageMode language_mode() const { return language_mode_; }
private:
+ HFunctionLiteral(HValue* context,
+ Handle<SharedFunctionInfo> shared,
+ bool pretenure)
+ : HTemplateInstruction<1>(HType::JSObject()),
+ shared_info_(shared),
+ pretenure_(pretenure),
+ has_no_literals_(shared->num_literals() == 0),
+ is_generator_(shared->is_generator()),
+ language_mode_(shared->language_mode()) {
+ SetOperandAt(0, context);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
Handle<SharedFunctionInfo> shared_info_;
@@ -6691,11 +6908,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
class HTypeof V8_FINAL : public HTemplateInstruction<2> {
public:
- explicit HTypeof(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@@ -6709,6 +6922,12 @@ class HTypeof V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(Typeof)
private:
+ explicit HTypeof(HValue* context, HValue* value) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ set_representation(Representation::Tagged());
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -6753,8 +6972,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
- ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
- function->function_id == Runtime::kCreateObjectLiteralShallow);
+ ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
#endif
}
@@ -6764,9 +6982,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
class HValueOf V8_FINAL : public HUnaryOperation {
public:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HValueOf, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -6775,16 +6991,17 @@ class HValueOf V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(ValueOf)
private:
+ explicit HValueOf(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
class HDateField V8_FINAL : public HUnaryOperation {
public:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
Smi* index() const { return index_; }
@@ -6795,21 +7012,19 @@ class HDateField V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DateField)
private:
+ HDateField(HValue* date, Smi* index)
+ : HUnaryOperation(date), index_(index) {
+ set_representation(Representation::Tagged());
+ }
+
Smi* index_;
};
class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
public:
- HSeqStringSetChar(String::Encoding encoding,
- HValue* string,
- HValue* index,
- HValue* value) : encoding_(encoding) {
- SetOperandAt(0, string);
- SetOperandAt(1, index);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HSeqStringSetChar, String::Encoding,
+ HValue*, HValue*, HValue*);
String::Encoding encoding() { return encoding_; }
HValue* string() { return OperandAt(0); }
@@ -6824,6 +7039,16 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
private:
+ HSeqStringSetChar(String::Encoding encoding,
+ HValue* string,
+ HValue* index,
+ HValue* value) : encoding_(encoding) {
+ SetOperandAt(0, string);
+ SetOperandAt(1, index);
+ SetOperandAt(2, value);
+ set_representation(Representation::Tagged());
+ }
+
String::Encoding encoding_;
};
@@ -6867,11 +7092,7 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
public:
- static HForInPrepareMap* New(Zone* zone,
- HValue* context,
- HValue* object) {
- return new(zone) HForInPrepareMap(context, object);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
new file mode 100644
index 0000000000..3337188f9a
--- /dev/null
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -0,0 +1,510 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-alias-analysis.h"
+#include "hydrogen-load-elimination.h"
+#include "hydrogen-instructions.h"
+#include "hydrogen-flow-engine.h"
+
+namespace v8 {
+namespace internal {
+
+#define GLOBAL true
+#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
+
+static const int kMaxTrackedFields = 16;
+static const int kMaxTrackedObjects = 5;
+
+// An element in the field approximation list.
+class HFieldApproximation : public ZoneObject {
+ public: // Just a data blob.
+ HValue* object_;
+ HLoadNamedField* last_load_;
+ HValue* last_value_;
+ HFieldApproximation* next_;
+
+ // Recursively copy the entire linked list of field approximations.
+ HFieldApproximation* Copy(Zone* zone) {
+ if (this == NULL) return NULL;
+ HFieldApproximation* copy = new(zone) HFieldApproximation();
+ copy->object_ = this->object_;
+ copy->last_load_ = this->last_load_;
+ copy->last_value_ = this->last_value_;
+ copy->next_ = this->next_->Copy(zone);
+ return copy;
+ }
+};
+
+
+// The main datastructure used during load/store elimination. Each in-object
+// field is tracked separately. For each field, store a list of known field
+// values for known objects.
+class HLoadEliminationTable : public ZoneObject {
+ public:
+ HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
+ : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
+
+ // The main processing of instructions.
+ HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kLoadNamedField: {
+ HLoadNamedField* l = HLoadNamedField::cast(instr);
+ TRACE((" process L%d field %d (o%d)\n",
+ instr->id(),
+ FieldOf(l->access()),
+ l->object()->ActualValue()->id()));
+ HValue* result = load(l);
+ if (result != instr) {
+ // The load can be replaced with a previous load or a value.
+ TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
+ instr->DeleteAndReplaceWith(result);
+ }
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ HStoreNamedField* s = HStoreNamedField::cast(instr);
+ TRACE((" process S%d field %d (o%d) = v%d\n",
+ instr->id(),
+ FieldOf(s->access()),
+ s->object()->ActualValue()->id(),
+ s->value()->id()));
+ HValue* result = store(s);
+ if (result == NULL) {
+ // The store is redundant. Remove it.
+ TRACE((" remove S%d\n", instr->id()));
+ instr->DeleteAndReplaceWith(NULL);
+ }
+ break;
+ }
+ default: {
+ if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ TRACE((" kill-all i%d\n", instr->id()));
+ Kill();
+ break;
+ }
+ if (instr->CheckGVNFlag(kChangesMaps)) {
+ TRACE((" kill-maps i%d\n", instr->id()));
+ KillOffset(JSObject::kMapOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ TRACE((" kill-elements-kind i%d\n", instr->id()));
+ KillOffset(JSObject::kMapOffset);
+ KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ TRACE((" kill-elements i%d\n", instr->id()));
+ KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ TRACE((" kill-osr i%d\n", instr->id()));
+ Kill();
+ }
+ }
+ // Improvements possible:
+ // - learn from HCheckMaps for field 0
+ // - remove unobservable stores (write-after-write)
+ // - track cells
+ // - track globals
+ // - track roots
+ }
+ return this;
+ }
+
+ // Support for global analysis with HFlowEngine: Copy state to sucessor block.
+ HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) {
+ HLoadEliminationTable* copy =
+ new(zone) HLoadEliminationTable(zone, aliasing_);
+ copy->EnsureFields(fields_.length());
+ for (int i = 0; i < fields_.length(); i++) {
+ copy->fields_[i] = fields_[i]->Copy(zone);
+ }
+ if (FLAG_trace_load_elimination) {
+ TRACE((" copy-to B%d\n", succ->block_id()));
+ copy->Print();
+ }
+ return copy;
+ }
+
+ // Support for global analysis with HFlowEngine: Merge this state with
+ // the other incoming state.
+ HLoadEliminationTable* Merge(HBasicBlock* succ,
+ HLoadEliminationTable* that, Zone* zone) {
+ if (that->fields_.length() < fields_.length()) {
+ // Drop fields not in the other table.
+ fields_.Rewind(that->fields_.length());
+ }
+ for (int i = 0; i < fields_.length(); i++) {
+ // Merge the field approximations for like fields.
+ HFieldApproximation* approx = fields_[i];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ // TODO(titzer): Merging is O(N * M); sort?
+ HFieldApproximation* other = that->Find(approx->object_, i);
+ if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
+ // Kill an entry that doesn't agree with the other value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[i] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+ return this;
+ }
+
+ friend class HLoadEliminationEffects; // Calls Kill() and others.
+ friend class HLoadEliminationPhase;
+
+ private:
+ // Process a load instruction, updating internal table state. If a previous
+ // load or store for this object and field exists, return the new value with
+ // which the load should be replaced. Otherwise, return {instr}.
+ HValue* load(HLoadNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return instr;
+
+ HValue* object = instr->object()->ActualValue();
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (approx->last_value_ == NULL) {
+ // Load is not redundant. Fill out a new entry.
+ approx->last_load_ = instr;
+ approx->last_value_ = instr;
+ return instr;
+ } else {
+ // Eliminate the load. Reuse previously stored value or load instruction.
+ return approx->last_value_;
+ }
+ }
+
+ // Process a store instruction, updating internal table state. If a previous
+ // store to the same object and field makes this store redundant (e.g. because
+ // the stored values are the same), return NULL indicating that this store
+ // instruction is redundant. Otherwise, return {instr}.
+ HValue* store(HStoreNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return KillIfMisaligned(instr);
+
+ HValue* object = instr->object()->ActualValue();
+ HValue* value = instr->value();
+
+ // Kill non-equivalent may-alias entries.
+ KillFieldInternal(object, field, value);
+ if (instr->has_transition()) {
+ // A transition store alters the map of the object.
+ // TODO(titzer): remember the new map (a constant) for the object.
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ }
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (Equal(approx->last_value_, value)) {
+ // The store is redundant because the field already has this value.
+ return NULL;
+ } else {
+ // The store is not redundant. Update the entry.
+ approx->last_load_ = NULL;
+ approx->last_value_ = value;
+ return instr;
+ }
+ }
+
+ // Kill everything in this table.
+ void Kill() {
+ fields_.Rewind(0);
+ }
+
+ // Kill all entries matching the given offset.
+ void KillOffset(int offset) {
+ int field = FieldOf(offset);
+ if (field >= 0 && field < fields_.length()) {
+ fields_[field] = NULL;
+ }
+ }
+
+ // Kill all entries aliasing the given store.
+ void KillStore(HStoreNamedField* s) {
+ int field = FieldOf(s->access());
+ if (field >= 0) {
+ KillFieldInternal(s->object()->ActualValue(), field, s->value());
+ } else {
+ KillIfMisaligned(s);
+ }
+ }
+
+ // Kill multiple entries in the case of a misaligned store.
+ HValue* KillIfMisaligned(HStoreNamedField* instr) {
+ HObjectAccess access = instr->access();
+ if (access.IsInobject()) {
+ int offset = access.offset();
+ if ((offset % kPointerSize) != 0) {
+ // Kill the field containing the first word of the access.
+ HValue* object = instr->object()->ActualValue();
+ int field = offset / kPointerSize;
+ KillFieldInternal(object, field, NULL);
+
+ // Kill the next field in case of overlap.
+ int size = kPointerSize;
+ if (access.representation().IsByte()) size = 1;
+ else if (access.representation().IsInteger32()) size = 4;
+ int next_field = (offset + size - 1) / kPointerSize;
+ if (next_field != field) KillFieldInternal(object, next_field, NULL);
+ }
+ }
+ return instr;
+ }
+
+ // Find an entry for the given object and field pair.
+ HFieldApproximation* Find(HValue* object, int field) {
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ approx = approx->next_;
+ }
+ return NULL;
+ }
+
+ // Find or create an entry for the given object and field pair.
+ HFieldApproximation* FindOrCreate(HValue* object, int field) {
+ EnsureFields(field + 1);
+
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ int count = 0;
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ count++;
+ approx = approx->next_;
+ }
+
+ if (count >= kMaxTrackedObjects) {
+ // Pull the last entry off the end and repurpose it for this object.
+ approx = ReuseLastApproximation(field);
+ } else {
+ // Allocate a new entry.
+ approx = new(zone_) HFieldApproximation();
+ }
+
+ // Insert the entry at the head of the list.
+ approx->object_ = object;
+ approx->last_load_ = NULL;
+ approx->last_value_ = NULL;
+ approx->next_ = fields_[field];
+ fields_[field] = approx;
+
+ return approx;
+ }
+
+ // Kill all entries for a given field that _may_ alias the given object
+ // and do _not_ have the given value.
+ void KillFieldInternal(HValue* object, int field, HValue* value) {
+ if (field >= fields_.length()) return; // Nothing to do.
+
+ HFieldApproximation* approx = fields_[field];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ if (aliasing_->MayAlias(object, approx->object_)) {
+ if (!Equal(approx->last_value_, value)) {
+ // Kill an aliasing entry that doesn't agree on the value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[field] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+
+ bool Equal(HValue* a, HValue* b) {
+ if (a == b) return true;
+ if (a != NULL && b != NULL) return a->Equals(b);
+ return false;
+ }
+
+ // Remove the last approximation for a field so that it can be reused.
+ // We reuse the last entry because it was the first inserted and is thus
+ // farthest away from the current instruction.
+ HFieldApproximation* ReuseLastApproximation(int field) {
+ HFieldApproximation* approx = fields_[field];
+ ASSERT(approx != NULL);
+
+ HFieldApproximation* prev = NULL;
+ while (approx->next_ != NULL) {
+ prev = approx;
+ approx = approx->next_;
+ }
+ if (prev != NULL) prev->next_ = NULL;
+ return approx;
+ }
+
+ // Compute the field index for the given object access; -1 if not tracked.
+ int FieldOf(HObjectAccess access) {
+ return access.IsInobject() ? FieldOf(access.offset()) : -1;
+ }
+
+ // Compute the field index for the given in-object offset; -1 if not tracked.
+ int FieldOf(int offset) {
+ if (offset >= kMaxTrackedFields * kPointerSize) return -1;
+ // TODO(titzer): track misaligned loads in a separate list?
+ if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses.
+ return offset / kPointerSize;
+ }
+
+ // Ensure internal storage for the given number of fields.
+ void EnsureFields(int num_fields) {
+ if (fields_.length() < num_fields) {
+ fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
+ }
+ }
+
+ // Print this table to stdout.
+ void Print() {
+ for (int i = 0; i < fields_.length(); i++) {
+ PrintF(" field %d: ", i);
+ for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
+ PrintF("[o%d =", a->object_->id());
+ if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
+ if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
+ PrintF("] ");
+ }
+ PrintF("\n");
+ }
+ }
+
+ Zone* zone_;
+ ZoneList<HFieldApproximation*> fields_;
+ HAliasAnalyzer* aliasing_;
+};
+
+
+// Support for HFlowEngine: collect store effects within loops.
+class HLoadEliminationEffects : public ZoneObject {
+ public:
+ explicit HLoadEliminationEffects(Zone* zone)
+ : zone_(zone),
+ maps_stored_(false),
+ fields_stored_(false),
+ elements_stored_(false),
+ stores_(5, zone) { }
+
+ inline bool Disabled() {
+ return false; // Effects are _not_ disabled.
+ }
+
+ // Process a possibly side-effecting instruction.
+ void Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField: {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ break;
+ }
+ case HValue::kOsrEntry: {
+ // Kill everything. Loads must not be hoisted past the OSR entry.
+ maps_stored_ = true;
+ fields_stored_ = true;
+ elements_stored_ = true;
+ }
+ default: {
+ fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
+ }
+ }
+ }
+
+ // Apply these effects to the given load elimination table.
+ void Apply(HLoadEliminationTable* table) {
+ if (fields_stored_) {
+ table->Kill();
+ return;
+ }
+ if (maps_stored_) {
+ table->KillOffset(JSObject::kMapOffset);
+ }
+ if (elements_stored_) {
+ table->KillOffset(JSObject::kElementsOffset);
+ }
+
+ // Kill non-agreeing fields for each store contained in these effects.
+ for (int i = 0; i < stores_.length(); i++) {
+ table->KillStore(stores_[i]);
+ }
+ }
+
+ // Union these effects with the other effects.
+ void Union(HLoadEliminationEffects* that, Zone* zone) {
+ maps_stored_ |= that->maps_stored_;
+ fields_stored_ |= that->fields_stored_;
+ elements_stored_ |= that->elements_stored_;
+ for (int i = 0; i < that->stores_.length(); i++) {
+ stores_.Add(that->stores_[i], zone);
+ }
+ }
+
+ private:
+ Zone* zone_;
+ bool maps_stored_ : 1;
+ bool fields_stored_ : 1;
+ bool elements_stored_ : 1;
+ ZoneList<HStoreNamedField*> stores_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HLoadEliminationPhase::Run() {
+ HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
+ engine(graph(), zone());
+ HAliasAnalyzer aliasing;
+ HLoadEliminationTable* table =
+ new(zone()) HLoadEliminationTable(zone(), &aliasing);
+
+ if (GLOBAL) {
+ // Perform a global analysis.
+ engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+ } else {
+ // Perform only local analysis.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table->Kill();
+ engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-load-elimination.h b/deps/v8/src/hydrogen-load-elimination.h
new file mode 100644
index 0000000000..ef6f71fa11
--- /dev/null
+++ b/deps/v8/src/hydrogen-load-elimination.h
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_HYDROGEN_LOAD_ELIMINATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+class HLoadEliminationPhase : public HPhase {
+ public:
+ explicit HLoadEliminationPhase(HGraph* graph)
+ : HPhase("H_Load elimination", graph) { }
+
+ void Run();
+
+ private:
+ void EliminateLoads(HBasicBlock* block);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/hydrogen-mark-unreachable.cc b/deps/v8/src/hydrogen-mark-unreachable.cc
new file mode 100644
index 0000000000..d7c5ed2b18
--- /dev/null
+++ b/deps/v8/src/hydrogen-mark-unreachable.cc
@@ -0,0 +1,77 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-mark-unreachable.h"
+
+namespace v8 {
+namespace internal {
+
+
+void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() {
+ // If there is unreachable code in the graph, propagate the unreachable marks
+ // using a fixed-point iteration.
+ bool changed = true;
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ while (changed) {
+ changed = false;
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* block = blocks->at(i);
+ if (!block->IsReachable()) continue;
+ bool is_reachable = blocks->at(0) == block;
+ for (HPredecessorIterator it(block); !it.Done(); it.Advance()) {
+ HBasicBlock* predecessor = it.Current();
+ // A block is reachable if one of its predecessors is reachable,
+ // doesn't deoptimize and either is known to transfer control to the
+ // block or has a control flow instruction for which the next block
+ // cannot be determined.
+ if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) {
+ HBasicBlock* pred_succ;
+ bool known_pred_succ =
+ predecessor->end()->KnownSuccessorBlock(&pred_succ);
+ if (!known_pred_succ || pred_succ == block) {
+ is_reachable = true;
+ break;
+ }
+ }
+ if (block->is_osr_entry()) {
+ is_reachable = true;
+ }
+ }
+ if (!is_reachable) {
+ block->MarkUnreachable();
+ changed = true;
+ }
+ }
+ }
+}
+
+
+void HMarkUnreachableBlocksPhase::Run() {
+ MarkUnreachableBlocks();
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.h b/deps/v8/src/hydrogen-mark-unreachable.h
index 7d6e6e4bda..e9459d5208 100644
--- a/deps/v8/src/hydrogen-deoptimizing-mark.h
+++ b/deps/v8/src/hydrogen-mark-unreachable.h
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_HYDROGEN_DEOPTIMIZING_MARK_H_
-#define V8_HYDROGEN_DEOPTIMIZING_MARK_H_
+#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_
+#define V8_HYDROGEN_MARK_UNREACHABLE_H_
#include "hydrogen.h"
@@ -34,23 +34,20 @@ namespace v8 {
namespace internal {
-// Mark all blocks that are dominated by an unconditional soft deoptimize to
-// prevent code motion across those blocks.
-class HPropagateDeoptimizingMarkPhase : public HPhase {
+class HMarkUnreachableBlocksPhase : public HPhase {
public:
- explicit HPropagateDeoptimizingMarkPhase(HGraph* graph)
- : HPhase("H_Propagate deoptimizing mark", graph) { }
+ explicit HMarkUnreachableBlocksPhase(HGraph* graph)
+ : HPhase("H_Mark unrechable blocks", graph) { }
void Run();
private:
- void MarkAsDeoptimizing();
- void NullifyUnreachableInstructions();
+ void MarkUnreachableBlocks();
- DISALLOW_COPY_AND_ASSIGN(HPropagateDeoptimizingMarkPhase);
+ DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase);
};
} } // namespace v8::internal
-#endif // V8_HYDROGEN_DEOPTIMIZING_MARK_H_
+#endif // V8_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc
index 6b1df1e7a5..6e39df6aa9 100644
--- a/deps/v8/src/hydrogen-osr.cc
+++ b/deps/v8/src/hydrogen-osr.cc
@@ -37,19 +37,8 @@ bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) {
}
-// Build a new loop header block and set it as the current block.
-HBasicBlock *HOsrBuilder::BuildLoopEntry() {
- HBasicBlock* loop_entry = builder_->CreateLoopHeaderBlock();
- builder_->current_block()->Goto(loop_entry);
- builder_->set_current_block(loop_entry);
- return loop_entry;
-}
-
-
-HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
- IterationStatement* statement) {
- // Check if there is an OSR here first.
- if (!HasOsrEntryAt(statement)) return BuildLoopEntry();
+HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
+ ASSERT(HasOsrEntryAt(statement));
Zone* zone = builder_->zone();
HGraph* graph = builder_->graph();
@@ -63,12 +52,12 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = new(zone) HBranch(true_value, ToBooleanStub::Types(),
- non_osr_entry, osr_entry_);
- builder_->current_block()->Finish(test);
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+ non_osr_entry, osr_entry_);
+ builder_->FinishCurrentBlock(test);
HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
- non_osr_entry->Goto(loop_predecessor);
+ builder_->Goto(non_osr_entry, loop_predecessor);
builder_->set_current_block(osr_entry_);
osr_entry_->set_osr_entry();
@@ -108,12 +97,12 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
builder_->Add<HOsrEntry>(osr_entry_id);
HContext* context = builder_->Add<HContext>();
environment->BindContext(context);
- builder_->current_block()->Goto(loop_predecessor);
+ builder_->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
builder_->set_current_block(loop_predecessor);
// Create the final loop entry
- osr_loop_entry_ = BuildLoopEntry();
+ osr_loop_entry_ = builder_->BuildLoopEntry();
return osr_loop_entry_;
}
diff --git a/deps/v8/src/hydrogen-osr.h b/deps/v8/src/hydrogen-osr.h
index 5014a75bda..ae72ce650c 100644
--- a/deps/v8/src/hydrogen-osr.h
+++ b/deps/v8/src/hydrogen-osr.h
@@ -45,9 +45,10 @@ class HOsrBuilder : public ZoneObject {
osr_entry_(NULL),
osr_loop_entry_(NULL),
osr_values_(NULL) { }
+
// Creates the loop entry block for the given statement, setting up OSR
// entries as necessary, and sets the current block to the new block.
- HBasicBlock* BuildPossibleOsrLoopEntry(IterationStatement* statement);
+ HBasicBlock* BuildOsrLoopEntry(IterationStatement* statement);
// Process the hydrogen graph after it has been completed, performing
// any OSR-specific cleanups or changes.
@@ -61,10 +62,9 @@ class HOsrBuilder : public ZoneObject {
return unoptimized_frame_slots_;
}
- private:
- HBasicBlock* BuildLoopEntry();
bool HasOsrEntryAt(IterationStatement* statement);
+ private:
int unoptimized_frame_slots_;
HOptimizedGraphBuilder* builder_;
HBasicBlock* osr_entry_;
diff --git a/deps/v8/src/hydrogen-redundant-phi.cc b/deps/v8/src/hydrogen-redundant-phi.cc
index 9c38200577..1263833dac 100644
--- a/deps/v8/src/hydrogen-redundant-phi.cc
+++ b/deps/v8/src/hydrogen-redundant-phi.cc
@@ -31,37 +31,18 @@ namespace v8 {
namespace internal {
void HRedundantPhiEliminationPhase::Run() {
- // We do a simple fixed point iteration without any work list, because
- // machine-generated JavaScript can lead to a very dense Hydrogen graph with
- // an enormous work list and will consequently result in OOM. Experiments
- // showed that this simple algorithm is good enough, and even e.g. tracking
- // the set or range of blocks to consider is not a real improvement.
- bool need_another_iteration;
+ // Gather all phis from all blocks first.
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- ZoneList<HPhi*> redundant_phis(blocks->length(), zone());
- do {
- need_another_iteration = false;
- for (int i = 0; i < blocks->length(); ++i) {
- HBasicBlock* block = blocks->at(i);
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- HValue* replacement = phi->GetRedundantReplacement();
- if (replacement != NULL) {
- // Remember phi to avoid concurrent modification of the block's phis.
- redundant_phis.Add(phi, zone());
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- need_another_iteration |= value->IsPhi();
- }
- }
- }
- for (int i = 0; i < redundant_phis.length(); i++) {
- block->RemovePhi(redundant_phis[i]);
- }
- redundant_phis.Clear();
+ ZoneList<HPhi*> all_phis(blocks->length(), zone());
+ for (int i = 0; i < blocks->length(); ++i) {
+ HBasicBlock* block = blocks->at(i);
+ for (int j = 0; j < block->phis()->length(); j++) {
+ all_phis.Add(block->phis()->at(j), zone());
}
- } while (need_another_iteration);
+ }
+
+ // Iteratively reduce all phis in the list.
+ ProcessPhis(&all_phis);
#if DEBUG
// Make sure that we *really* removed all redundant phis.
@@ -73,4 +54,35 @@ void HRedundantPhiEliminationPhase::Run() {
#endif
}
+
+void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) {
+ ProcessPhis(block->phis());
+}
+
+
+void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
+ bool updated;
+ do {
+ // Iterately replace all redundant phis in the given list.
+ updated = false;
+ for (int i = 0; i < phis->length(); i++) {
+ HPhi* phi = phis->at(i);
+ if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced.
+
+ HValue* replacement = phi->GetRedundantReplacement();
+ if (replacement != NULL) {
+ phi->SetFlag(HValue::kIsDead);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ value->SetOperandAt(it.index(), replacement);
+ // Iterate again if used in another non-dead phi.
+ updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead);
+ }
+ phi->block()->RemovePhi(phi);
+ }
+ }
+ } while (updated);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-redundant-phi.h b/deps/v8/src/hydrogen-redundant-phi.h
index 6291fa5b78..960ae69c95 100644
--- a/deps/v8/src/hydrogen-redundant-phi.h
+++ b/deps/v8/src/hydrogen-redundant-phi.h
@@ -42,8 +42,11 @@ class HRedundantPhiEliminationPhase : public HPhase {
: HPhase("H_Redundant phi elimination", graph) { }
void Run();
+ void ProcessBlock(HBasicBlock* block);
private:
+ void ProcessPhis(const ZoneList<HPhi*>* phis);
+
DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
};
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index 960113782f..d0c9b58258 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -61,6 +61,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
+ if (use_value->position() != RelocInfo::kNoPosition) {
+ new_value->set_position(use_value->position());
+ } else {
+ ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ }
}
new_value->InsertBefore(next);
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc
index 835a198d4d..8de887d6f8 100644
--- a/deps/v8/src/hydrogen-uint32-analysis.cc
+++ b/deps/v8/src/hydrogen-uint32-analysis.cc
@@ -35,8 +35,17 @@ bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
// Operations that operate on bits are safe.
if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
return true;
- } else if (use->IsChange() || use->IsSimulate()) {
- // Conversions and deoptimization have special support for unt32.
+ } else if (use->IsSimulate()) {
+ // Deoptimization has special support for uint32.
+ return true;
+ } else if (use->IsChange()) {
+ // Conversions have special support for uint32.
+ // This ASSERT guards that the conversion in question is actually
+ // implemented. Do not extend the whitelist without adding
+ // support to LChunkBuilder::DoChange().
+ ASSERT(HChange::cast(use)->to().IsDouble() ||
+ HChange::cast(use)->to().IsSmi() ||
+ HChange::cast(use)->to().IsTagged());
return true;
} else if (use->IsStoreKeyed()) {
HStoreKeyed* store = HStoreKeyed::cast(use);
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 15ef5ed0b6..3b232e6e93 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -30,21 +30,24 @@
#include <algorithm>
#include "v8.h"
+#include "allocation-site-scopes.h"
#include "codegen.h"
#include "full-codegen.h"
#include "hashmap.h"
#include "hydrogen-bce.h"
#include "hydrogen-bch.h"
#include "hydrogen-canonicalize.h"
+#include "hydrogen-check-elimination.h"
#include "hydrogen-dce.h"
#include "hydrogen-dehoist.h"
-#include "hydrogen-deoptimizing-mark.h"
#include "hydrogen-environment-liveness.h"
#include "hydrogen-escape-analysis.h"
#include "hydrogen-infer-representation.h"
#include "hydrogen-infer-types.h"
+#include "hydrogen-load-elimination.h"
#include "hydrogen-gvn.h"
#include "hydrogen-mark-deoptimize.h"
+#include "hydrogen-mark-unreachable.h"
#include "hydrogen-minus-zero.h"
#include "hydrogen-osr.h"
#include "hydrogen-range-analysis.h"
@@ -94,7 +97,7 @@ HBasicBlock::HBasicBlock(HGraph* graph)
parent_loop_header_(NULL),
inlined_entry_block_(NULL),
is_inline_return_target_(false),
- is_deoptimizing_(false),
+ is_reachable_(true),
dominates_loop_successors_(false),
is_osr_entry_(false) { }
@@ -104,6 +107,11 @@ Isolate* HBasicBlock::isolate() const {
}
+void HBasicBlock::MarkUnreachable() {
+ is_reachable_ = false;
+}
+
+
void HBasicBlock::AttachLoopInformation() {
ASSERT(!IsLoopHeader());
loop_information_ = new(zone()) HLoopInformation(this, zone());
@@ -132,16 +140,25 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr) {
+void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
+ if (position != RelocInfo::kNoPosition) {
+ instr->set_position(position);
+ }
if (first_ == NULL) {
ASSERT(last_environment() != NULL);
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
+ if (position != RelocInfo::kNoPosition) {
+ entry->set_position(position);
+ } else {
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ !graph()->info()->IsOptimizing());
+ }
first_ = last_ = entry;
}
instr->InsertAfter(last_);
@@ -192,9 +209,9 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end) {
+void HBasicBlock::Finish(HControlInstruction* end, int position) {
ASSERT(!IsFinished());
- AddInstruction(end);
+ AddInstruction(end, position);
end_ = end;
for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
it.Current()->RegisterPredecessor(this);
@@ -203,35 +220,44 @@ void HBasicBlock::Finish(HControlInstruction* end) {
void HBasicBlock::Goto(HBasicBlock* block,
+ int position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
state->inlining_kind() == DROP_EXTRA_ON_RETURN;
if (block->IsInlineReturnTarget()) {
- AddInstruction(new(zone()) HLeaveInlined());
+ HEnvironment* env = last_environment();
+ int argument_count = env->arguments_environment()->parameter_count();
+ AddInstruction(new(zone())
+ HLeaveInlined(state->entry(), argument_count),
+ position);
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
}
- if (add_simulate) AddNewSimulate(BailoutId::None());
+ if (add_simulate) AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(block);
- Finish(instr);
+ Finish(instr, position);
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
- FunctionState* state) {
+ FunctionState* state,
+ int position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
- AddInstruction(new(zone()) HLeaveInlined());
+ HEnvironment* env = last_environment();
+ int argument_count = env->arguments_environment()->parameter_count();
+ AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
+ position);
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
last_environment()->Push(return_value);
- AddNewSimulate(BailoutId::None());
+ AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(target);
- Finish(instr);
+ Finish(instr, position);
}
@@ -622,10 +648,21 @@ HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
// Can't pass GetInvalidContext() to HConstant::New, because that will
// recursively call GetConstant
HConstant* constant = HConstant::New(zone(), NULL, value);
- constant->InsertAfter(GetConstantUndefined());
+ constant->InsertAfter(entry_block()->first());
pointer->set(constant);
+ return constant;
}
- return pointer->get();
+ return ReinsertConstantIfNecessary(pointer->get());
+}
+
+
+HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) {
+ if (!constant->IsLinked()) {
+ // The constant was removed from the graph. Reinsert.
+ constant->ClearFlag(HValue::kIsDead);
+ constant->InsertAfter(entry_block()->first());
+ }
+ return constant;
}
@@ -648,21 +685,21 @@ HConstant* HGraph::GetConstantMinus1() {
HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
- isolate()->factory()->name##_value(), \
- UniqueValueId::name##_value(isolate()->heap()), \
+ Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
Representation::Tagged(), \
htype, \
false, \
true, \
false, \
boolean_value); \
- constant->InsertAfter(GetConstantUndefined()); \
+ constant->InsertAfter(entry_block()->first()); \
constant_##name##_.set(constant); \
} \
- return constant_##name##_.get(); \
+ return ReinsertConstantIfNecessary(constant_##name##_.get()); \
}
+DEFINE_GET_CONSTANT(Undefined, undefined, HType::Tagged(), false)
DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true)
DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false)
DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false)
@@ -690,9 +727,8 @@ bool HGraph::IsStandardConstant(HConstant* constant) {
}
-HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position)
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
: builder_(builder),
- position_(position),
finished_(false),
deopt_then_(false),
deopt_else_(false),
@@ -715,7 +751,6 @@ HGraphBuilder::IfBuilder::IfBuilder(
HGraphBuilder* builder,
HIfContinuation* continuation)
: builder_(builder),
- position_(RelocInfo::kNoPosition),
finished_(false),
deopt_then_(false),
deopt_else_(false),
@@ -726,16 +761,17 @@ HGraphBuilder::IfBuilder::IfBuilder(
captured_(false),
needs_compare_(false),
first_true_block_(NULL),
+ last_true_block_(NULL),
first_false_block_(NULL),
split_edge_merge_block_(NULL),
merge_block_(NULL) {
continuation->Continue(&first_true_block_,
- &first_false_block_,
- &position_);
+ &first_false_block_);
}
-void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) {
+HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
+ HControlInstruction* compare) {
if (split_edge_merge_block_ != NULL) {
HEnvironment* env = first_false_block_->last_environment();
HBasicBlock* split_edge =
@@ -747,24 +783,26 @@ void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) {
compare->SetSuccessorAt(0, first_true_block_);
compare->SetSuccessorAt(1, split_edge);
}
- split_edge->GotoNoSimulate(split_edge_merge_block_);
+ builder_->GotoNoSimulate(split_edge, split_edge_merge_block_);
} else {
compare->SetSuccessorAt(0, first_true_block_);
compare->SetSuccessorAt(1, first_false_block_);
}
- builder_->current_block()->Finish(compare);
+ builder_->FinishCurrentBlock(compare);
needs_compare_ = false;
+ return compare;
}
void HGraphBuilder::IfBuilder::Or() {
+ ASSERT(!needs_compare_);
ASSERT(!did_and_);
did_or_ = true;
HEnvironment* env = first_false_block_->last_environment();
if (split_edge_merge_block_ == NULL) {
split_edge_merge_block_ =
builder_->CreateBasicBlock(env->Copy());
- first_true_block_->GotoNoSimulate(split_edge_merge_block_);
+ builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
first_true_block_ = split_edge_merge_block_;
}
builder_->set_current_block(first_false_block_);
@@ -773,12 +811,13 @@ void HGraphBuilder::IfBuilder::Or() {
void HGraphBuilder::IfBuilder::And() {
+ ASSERT(!needs_compare_);
ASSERT(!did_or_);
did_and_ = true;
HEnvironment* env = first_false_block_->last_environment();
if (split_edge_merge_block_ == NULL) {
split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy());
- first_false_block_->GotoNoSimulate(split_edge_merge_block_);
+ builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
first_false_block_ = split_edge_merge_block_;
}
builder_->set_current_block(first_true_block_);
@@ -796,7 +835,29 @@ void HGraphBuilder::IfBuilder::CaptureContinuation(
HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
? builder_->current_block()
: first_false_block_;
- continuation->Capture(true_block, false_block, position_);
+ continuation->Capture(true_block, false_block);
+ captured_ = true;
+ End();
+}
+
+
+void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+ ASSERT(!finished_);
+ ASSERT(!captured_);
+ HBasicBlock* true_block = last_true_block_ == NULL
+ ? first_true_block_
+ : last_true_block_;
+ HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
+ ? builder_->current_block()
+ : first_false_block_;
+ if (true_block != NULL && !true_block->IsFinished()) {
+ ASSERT(continuation->IsTrueReachable());
+ builder_->GotoNoSimulate(true_block, continuation->true_branch());
+ }
+ if (false_block != NULL && !false_block->IsFinished()) {
+ ASSERT(continuation->IsFalseReachable());
+ builder_->GotoNoSimulate(false_block, continuation->false_branch());
+ }
captured_ = true;
End();
}
@@ -814,10 +875,9 @@ void HGraphBuilder::IfBuilder::Then() {
HConstant* constant_false = builder_->graph()->GetConstantFalse();
ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
boolean_type.Add(ToBooleanStub::BOOLEAN);
- HBranch* branch =
- new(zone()) HBranch(constant_false, boolean_type, first_true_block_,
- first_false_block_);
- builder_->current_block()->Finish(branch);
+ HBranch* branch = builder()->New<HBranch>(
+ constant_false, boolean_type, first_true_block_, first_false_block_);
+ builder_->FinishCurrentBlock(branch);
}
builder_->set_current_block(first_true_block_);
}
@@ -845,10 +905,9 @@ void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
void HGraphBuilder::IfBuilder::Return(HValue* value) {
- HBasicBlock* block = builder_->current_block();
HValue* parameter_count = builder_->graph()->GetConstantMinus1();
- block->FinishExit(builder_->New<HReturn>(value, parameter_count));
- builder_->set_current_block(NULL);
+ builder_->FinishExitCurrentBlock(
+ builder_->New<HReturn>(value, parameter_count));
if (did_else_) {
first_false_block_ = NULL;
} else {
@@ -878,17 +937,17 @@ void HGraphBuilder::IfBuilder::End() {
HBasicBlock* last_false_block = builder_->current_block();
ASSERT(!last_false_block->IsFinished());
if (deopt_then_) {
- last_false_block->GotoNoSimulate(merge_block_);
+ builder_->GotoNoSimulate(last_false_block, merge_block_);
builder_->PadEnvironmentForContinuation(last_true_block_,
merge_block_);
- last_true_block_->GotoNoSimulate(merge_block_);
+ builder_->GotoNoSimulate(last_true_block_, merge_block_);
} else {
- last_true_block_->GotoNoSimulate(merge_block_);
+ builder_->GotoNoSimulate(last_true_block_, merge_block_);
if (deopt_else_) {
builder_->PadEnvironmentForContinuation(last_false_block,
merge_block_);
}
- last_false_block->GotoNoSimulate(merge_block_);
+ builder_->GotoNoSimulate(last_false_block, merge_block_);
}
builder_->set_current_block(merge_block_);
}
@@ -936,7 +995,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
phi_ = header_block_->AddNewPhi(env->values()->length());
phi_->AddInput(initial);
env->Push(initial);
- builder_->current_block()->GotoNoSimulate(header_block_);
+ builder_->GotoNoSimulate(header_block_);
HEnvironment* body_env = env->Copy();
HEnvironment* exit_env = env->Copy();
@@ -948,11 +1007,8 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
builder_->set_current_block(header_block_);
env->Pop();
- HCompareNumericAndBranch* compare =
- new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
- compare->SetSuccessorAt(0, body_block_);
- compare->SetSuccessorAt(1, exit_block_);
- builder_->current_block()->Finish(compare);
+ builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>(
+ phi_, terminating, token, body_block_, exit_block_));
builder_->set_current_block(body_block_);
if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
@@ -976,10 +1032,10 @@ void HGraphBuilder::LoopBuilder::Break() {
// Its the first time we saw a break.
HEnvironment* env = exit_block_->last_environment()->Copy();
exit_trampoline_block_ = builder_->CreateBasicBlock(env);
- exit_block_->GotoNoSimulate(exit_trampoline_block_);
+ builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
}
- builder_->current_block()->GotoNoSimulate(exit_trampoline_block_);
+ builder_->GotoNoSimulate(exit_trampoline_block_);
}
@@ -999,7 +1055,7 @@ void HGraphBuilder::LoopBuilder::EndBody() {
// Push the new increment value on the expression stack to merge into the phi.
builder_->environment()->Push(increment_);
HBasicBlock* last_block = builder_->current_block();
- last_block->GotoNoSimulate(header_block_);
+ builder_->GotoNoSimulate(last_block, header_block_);
header_block_->loop_information()->RegisterBackEdge(last_block);
if (exit_trampoline_block_ != NULL) {
@@ -1017,14 +1073,16 @@ HGraph* HGraphBuilder::CreateGraph() {
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
- graph()->FinalizeUniqueValueIds();
+ graph()->FinalizeUniqueness();
return graph_;
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, position_);
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1032,8 +1090,27 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddIncrementCounter(StatsCounter* counter,
- HValue* context) {
+void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->Finish(last, position_);
+ if (last->IsReturn() || last->IsAbnormalExit()) {
+ set_current_block(NULL);
+ }
+}
+
+
+void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->FinishExit(instruction, position_);
+ if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
+ set_current_block(NULL);
+ }
+}
+
+
+void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
if (FLAG_native_code_counters && counter->Enabled()) {
HValue* reference = Add<HConstant>(ExternalReference(counter));
HValue* old_value = Add<HLoadNamedField>(reference,
@@ -1081,9 +1158,9 @@ void HGraphBuilder::FinishExitWithHardDeoptimization(
PadEnvironmentForContinuation(current_block(), continuation);
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
if (graph()->IsInsideNoSideEffectsScope()) {
- current_block()->GotoNoSimulate(continuation);
+ GotoNoSimulate(continuation);
} else {
- current_block()->Goto(continuation);
+ Goto(continuation);
}
}
@@ -1128,7 +1205,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* length,
HValue* key,
bool is_js_array) {
- Zone* zone = this->zone();
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1144,8 +1220,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
Token::GTE);
capacity_checker.Then();
- HValue* context = environment()->context();
-
HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
HValue* max_capacity = Add<HAdd>(current_capacity, max_gap);
IfBuilder key_checker(this);
@@ -1166,8 +1240,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
capacity_checker.End();
if (is_js_array) {
- HValue* new_length = AddInstruction(
- HAdd::New(zone, context, key, graph_->GetConstant1()));
+ HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
new_length->ClearFlag(HValue::kCanOverflow);
Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind),
@@ -1252,6 +1325,135 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
+HValue* HGraphBuilder::BuildNumberToString(HValue* object,
+ Handle<Type> type) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Create a joinable continuation.
+ HIfContinuation found(graph()->CreateBasicBlock(),
+ graph()->CreateBasicBlock());
+
+ // Load the number string cache.
+ HValue* number_string_cache =
+ Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ HValue* mask = AddLoadFixedArrayLength(number_string_cache);
+ mask->set_type(HType::Smi());
+ mask = Add<HSar>(mask, graph()->GetConstant1());
+ mask = Add<HSub>(mask, graph()->GetConstant1());
+
+ // Check whether object is a smi.
+ IfBuilder if_objectissmi(this);
+ if_objectissmi.If<HIsSmiAndBranch>(object);
+ if_objectissmi.Then();
+ {
+ // Compute hash for smi similar to smi_get_hash().
+ HValue* hash = Add<HBitwise>(Token::BIT_AND, object, mask);
+
+ // Load the key.
+ HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ // Check if object == key.
+ IfBuilder if_objectiskey(this);
+ if_objectiskey.If<HCompareObjectEqAndBranch>(object, key);
+ if_objectiskey.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_objectiskey.JoinContinuation(&found);
+ }
+ if_objectissmi.Else();
+ {
+ if (type->Is(Type::Smi())) {
+ if_objectissmi.Deopt("Excepted smi");
+ } else {
+ // Check if the object is a heap number.
+ IfBuilder if_objectisnumber(this);
+ if_objectisnumber.If<HCompareMap>(
+ object, isolate()->factory()->heap_number_map());
+ if_objectisnumber.Then();
+ {
+ // Compute hash for heap number similar to double_get_hash().
+ HValue* low = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueLowestBits());
+ HValue* high = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueHighestBits());
+ HValue* hash = Add<HBitwise>(Token::BIT_XOR, low, high);
+ hash = Add<HBitwise>(Token::BIT_AND, hash, mask);
+
+ // Load the key.
+ HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ // Check if key is a heap number (the number string cache contains only
+ // SMIs and heap number, so it is sufficient to do a SMI check here).
+ IfBuilder if_keyisnotsmi(this);
+ if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
+ if_keyisnotsmi.Then();
+ {
+ // Check if values of key and object match.
+ IfBuilder if_keyeqobject(this);
+ if_keyeqobject.If<HCompareNumericAndBranch>(
+ Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()),
+ Token::EQ);
+ if_keyeqobject.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_keyeqobject.JoinContinuation(&found);
+ }
+ if_keyisnotsmi.JoinContinuation(&found);
+ }
+ if_objectisnumber.Else();
+ {
+ if (type->Is(Type::Number())) {
+ if_objectisnumber.Deopt("Expected heap number");
+ }
+ }
+ if_objectisnumber.JoinContinuation(&found);
+ }
+ }
+ if_objectissmi.JoinContinuation(&found);
+
+ // Check for cache hit.
+ IfBuilder if_found(this, &found);
+ if_found.Then();
+ {
+ // Count number to string operation in native code.
+ AddIncrementCounter(isolate()->counters()->number_to_string_native());
+
+ // Load the value in case of cache hit.
+ HValue* key_index = Pop();
+ HValue* value_index = Add<HAdd>(key_index, graph()->GetConstant1());
+ Push(Add<HLoadKeyed>(number_string_cache, value_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE));
+ }
+ if_found.Else();
+ {
+ // Cache miss, fallback to runtime.
+ Add<HPushArgument>(object);
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ 1));
+ }
+ if_found.End();
+
+ return Pop();
+}
+
+
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
@@ -1303,7 +1505,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
- HInstruction* result = AddExternalArrayElementAccess(
+ HInstruction* result = AddElementAccess(
external_elements, key, val, bounds_check, elements_kind, is_store);
negative_checker.ElseDeopt("Negative key encountered");
length_checker.End();
@@ -1313,7 +1515,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
checked_key = Add<HBoundsCheck>(key, length);
HLoadExternalArrayPointer* external_elements =
Add<HLoadExternalArrayPointer>(elements);
- return AddExternalArrayElementAccess(
+ return AddElementAccess(
external_elements, checked_key, val,
checked_object, elements_kind, is_store);
}
@@ -1346,14 +1548,13 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
elements_kind, length);
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map(),
- top_info());
+ elements, isolate()->factory()->fixed_array_map(), top_info());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
}
}
}
- return AddFastElementAccess(elements, checked_key, val, checked_object,
- elements_kind, is_store, load_mode, store_mode);
+ return AddElementAccess(elements, checked_key, val, checked_object,
+ elements_kind, is_store, load_mode);
}
@@ -1443,85 +1644,31 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
}
-HInstruction* HGraphBuilder::AddExternalArrayElementAccess(
- HValue* external_elements,
+HInstruction* HGraphBuilder::AddElementAccess(
+ HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store) {
+ bool is_store,
+ LoadKeyedHoleMode load_mode) {
if (is_store) {
ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = Add<HClampToUint8>(val);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+ if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+ val = Add<HClampToUint8>(val);
}
- return Add<HStoreKeyed>(external_elements, checked_key, val, elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load = Add<HLoadKeyed>(external_elements,
- checked_key,
- dependency,
- elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
+ return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
}
-}
-
-HInstruction* HGraphBuilder::AddFastElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
+ ASSERT(!is_store);
+ ASSERT(val == NULL);
+ HLoadKeyed* load = Add<HLoadKeyed>(
+ elements, checked_key, dependency, elements_kind, load_mode);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
}
- // It's an element load (!is_store).
- return Add<HLoadKeyed>(
- elements, checked_key, load_dependency, elements_kind, load_mode);
+ return load;
}
@@ -1771,9 +1918,8 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
void HGraphBuilder::BuildCompareNil(
HValue* value,
Handle<Type> type,
- int position,
HIfContinuation* continuation) {
- IfBuilder if_nil(this, position);
+ IfBuilder if_nil(this);
bool some_case_handled = false;
bool some_case_missing = false;
@@ -1824,12 +1970,11 @@ void HGraphBuilder::BuildCompareNil(
HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
int previous_object_size,
HValue* alloc_site) {
- // TODO(mvstanton): ASSERT altered to CHECK to diagnose chromium bug 284577
- CHECK(alloc_site != NULL);
+ ASSERT(alloc_site != NULL);
HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
- Handle<Map> alloc_memento_map(
- isolate()->heap()->allocation_memento_map());
+ Handle<Map> alloc_memento_map =
+ isolate()->factory()->allocation_memento_map();
AddStoreMapConstant(alloc_memento, alloc_memento_map);
HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
Add<HStoreNamedField>(alloc_memento, access, alloc_site);
@@ -1886,8 +2031,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access));
+ return builder()->AddLoadNamedField(constructor_function_, access);
}
HInstruction* native_context = builder()->BuildGetNativeContext();
@@ -1907,8 +2051,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access));
+ return builder()->AddLoadNamedField(constructor_function_, access);
}
@@ -1983,6 +2126,11 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes,
HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE);
+ // Folded array allocation should be aligned if it has fast double elements.
+ if (IsFastDoubleElementsKind(kind_)) {
+ new_object->MakeDoubleAligned();
+ }
+
// Fill in the fields: map, properties, length
HValue* map;
if (allocation_site_payload_ == NULL) {
@@ -2042,6 +2190,9 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_= &initial_function_state_;
InitializeAstVisitor(info->isolate());
+ if (FLAG_emit_opt_code_positions) {
+ SetSourcePosition(info->shared_info()->start_position());
+ }
}
@@ -2054,8 +2205,8 @@ HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
return first;
} else {
HBasicBlock* join_block = graph()->CreateBasicBlock();
- first->Goto(join_block);
- second->Goto(join_block);
+ Goto(first, join_block);
+ Goto(second, join_block);
join_block->SetJoinId(join_id);
return join_block;
}
@@ -2066,7 +2217,7 @@ HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block) {
if (continue_block != NULL) {
- if (exit_block != NULL) exit_block->Goto(continue_block);
+ if (exit_block != NULL) Goto(exit_block, continue_block);
continue_block->SetJoinId(statement->ContinueId());
return continue_block;
}
@@ -2079,10 +2230,10 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
HBasicBlock* body_exit,
HBasicBlock* loop_successor,
HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(loop_entry);
+ if (body_exit != NULL) Goto(body_exit, loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
- if (loop_successor != NULL) loop_successor->Goto(break_block);
+ if (loop_successor != NULL) Goto(loop_successor, break_block);
break_block->SetJoinId(statement->ExitId());
return break_block;
}
@@ -2090,8 +2241,26 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction) {
- Finish(instruction);
+// Build a new loop header block and set it as the current block.
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
+ HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+ Goto(loop_entry);
+ set_current_block(loop_entry);
+ return loop_entry;
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
+ IterationStatement* statement) {
+ HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
+ ? osr()->BuildOsrLoopEntry(statement)
+ : BuildLoopEntry();
+ return loop_entry;
+}
+
+
+void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+ Finish(instruction, position);
ClearEnvironment();
}
@@ -2109,7 +2278,6 @@ HGraph::HGraph(CompilationInfo* info)
zone_(info->zone()),
is_recursive_(false),
use_optimistic_licm_(false),
- has_soft_deoptimize_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
maximum_environment_size_(0),
@@ -2137,12 +2305,12 @@ HBasicBlock* HGraph::CreateBasicBlock() {
}
-void HGraph::FinalizeUniqueValueIds() {
+void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- it.Current()->FinalizeUniqueValueId();
+ it.Current()->FinalizeUniqueness();
}
}
}
@@ -2640,7 +2808,7 @@ void EffectContext::ReturnControl(HControlInstruction* instr,
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
+ owner()->FinishCurrentBlock(instr);
HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
owner()->set_current_block(join);
}
@@ -2650,7 +2818,7 @@ void EffectContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* true_branch = NULL;
HBasicBlock* false_branch = NULL;
- continuation->Continue(&true_branch, &false_branch, NULL);
+ continuation->Continue(&true_branch, &false_branch);
if (!continuation->IsTrueReachable()) {
owner()->set_current_block(false_branch);
} else if (!continuation->IsFalseReachable()) {
@@ -2684,7 +2852,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, materialize_true);
instr->SetSuccessorAt(1, materialize_false);
- owner()->current_block()->Finish(instr);
+ owner()->FinishCurrentBlock(instr);
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
owner()->set_current_block(materialize_false);
@@ -2699,7 +2867,7 @@ void ValueContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* materialize_true = NULL;
HBasicBlock* materialize_false = NULL;
- continuation->Continue(&materialize_true, &materialize_false, NULL);
+ continuation->Continue(&materialize_true, &materialize_false);
if (continuation->IsTrueReachable()) {
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
@@ -2739,9 +2907,9 @@ void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
- empty_true->Goto(if_true(), owner()->function_state());
- empty_false->Goto(if_false(), owner()->function_state());
+ owner()->FinishCurrentBlock(instr);
+ owner()->Goto(empty_true, if_true(), owner()->function_state());
+ owner()->Goto(empty_false, if_false(), owner()->function_state());
owner()->set_current_block(NULL);
}
@@ -2750,12 +2918,12 @@ void TestContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* true_branch = NULL;
HBasicBlock* false_branch = NULL;
- continuation->Continue(&true_branch, &false_branch, NULL);
+ continuation->Continue(&true_branch, &false_branch);
if (continuation->IsTrueReachable()) {
- true_branch->Goto(if_true(), owner()->function_state());
+ owner()->Goto(true_branch, if_true(), owner()->function_state());
}
if (continuation->IsFalseReachable()) {
- false_branch->Goto(if_false(), owner()->function_state());
+ owner()->Goto(false_branch, if_false(), owner()->function_state());
}
owner()->set_current_block(NULL);
}
@@ -2773,11 +2941,11 @@ void TestContext::BuildBranch(HValue* value) {
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
ToBooleanStub::Types expected(condition()->to_boolean_types());
- HBranch* test = new(zone()) HBranch(value, expected, empty_true, empty_false);
- builder->current_block()->Finish(test);
+ builder->FinishCurrentBlock(builder->New<HBranch>(
+ value, expected, empty_true, empty_false));
- empty_true->Goto(if_true(), builder->function_state());
- empty_false->Goto(if_false(), builder->function_state());
+ owner()->Goto(empty_true, if_true(), builder->function_state());
+ owner()->Goto(empty_false , if_false(), builder->function_state());
builder->set_current_block(NULL);
}
@@ -2894,7 +3062,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
// not replayed by the Lithium translation.
HEnvironment* initial_env = environment()->CopyWithoutHistory();
HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
+ Goto(body_entry);
body_entry->SetJoinId(BailoutId::FunctionEntry());
set_current_block(body_entry);
@@ -2906,8 +3074,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
VisitDeclarations(scope->declarations());
Add<HSimulate>(BailoutId::Declarations());
- HValue* context = environment()->context();
- Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
+ Add<HStackCheck>(HStackCheck::kFunctionEntry);
VisitStatements(current_info()->function()->body());
if (HasStackOverflow()) return false;
@@ -2932,7 +3099,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
type_info->set_inlined_type_change_checksum(composite_checksum);
// Perform any necessary OSR-specific cleanups or changes to the graph.
- osr_->FinishGraph();
+ osr()->FinishGraph();
return true;
}
@@ -2957,7 +3124,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
Run<HEnvironmentLivenessAnalysisPhase>();
}
- Run<HPropagateDeoptimizingMarkPhase>();
if (!CheckConstPhiUses()) {
*bailout_reason = kUnsupportedPhiUseOfConstVariable;
return false;
@@ -2968,11 +3134,16 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
return false;
}
- // Remove dead code and phis
- if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
+ // Find and mark unreachable code to simplify optimizations, especially gvn,
+ // where unreachable code could unnecessarily defeat LICM.
+ Run<HMarkUnreachableBlocksPhase>();
+ if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
+ if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
+ if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
+
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
@@ -3006,17 +3177,17 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
- if (FLAG_array_bounds_checks_elimination) {
- Run<HBoundsCheckEliminationPhase>();
- }
- if (FLAG_array_bounds_checks_hoisting) {
- Run<HBoundsCheckHoistingPhase>();
- }
+ if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
+ if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
RestoreActualValues();
+ // Find unreachable code a second time, GVN and other optimizations may have
+ // made blocks unreachable that were previously reachable.
+ Run<HMarkUnreachableBlocksPhase>();
+
return true;
}
@@ -3049,12 +3220,6 @@ void HGraph::RestoreActualValues() {
}
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
- Push(instr);
- AddInstruction(instr);
-}
-
-
template <class Instruction>
HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
@@ -3075,10 +3240,6 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
HInstruction* context = Add<HContext>();
environment()->BindContext(context);
- HConstant* undefined_constant = HConstant::cast(Add<HConstant>(
- isolate()->factory()->undefined_value()));
- graph()->set_undefined_constant(undefined_constant);
-
// Create an arguments object containing the initial parameters. Set the
// initial values of parameters including "this" having parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
@@ -3092,6 +3253,7 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
AddInstruction(arguments_object);
graph()->SetArgumentsObject(arguments_object);
+ HConstant* undefined_constant = graph()->GetConstantUndefined();
// Initialize specials and locals to undefined.
for (int i = environment()->parameter_count() + 1;
i < environment()->length();
@@ -3134,7 +3296,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
}
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
- if (current_block() != NULL) current_block()->Goto(break_block);
+ if (current_block() != NULL) Goto(break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
@@ -3244,7 +3406,7 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
HBasicBlock* continue_block = break_scope()->Get(
stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra);
Drop(drop_extra);
- current_block()->Goto(continue_block);
+ Goto(continue_block);
set_current_block(NULL);
}
@@ -3257,7 +3419,7 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
HBasicBlock* break_block = break_scope()->Get(
stmt->target(), BreakAndContinueScope::BREAK, &drop_extra);
Drop(drop_extra);
- current_block()->Goto(break_block);
+ Goto(break_block);
set_current_block(NULL);
}
@@ -3280,26 +3442,26 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), state);
+ Goto(test->if_true(), state);
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(return_value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ New<HHasInstanceTypeAndBranch>(return_value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
- current_block()->Finish(typecheck);
- if_spec_object->AddLeaveInlined(return_value, state);
- not_spec_object->AddLeaveInlined(receiver, state);
+ FinishCurrentBlock(typecheck);
+ AddLeaveInlined(if_spec_object, return_value, state);
+ AddLeaveInlined(not_spec_object, receiver, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Return from an inlined setter call. The returned value is never used, the
@@ -3309,11 +3471,11 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
HValue* rhs = environment()->arguments_environment()->Lookup(1);
context->ReturnValue(rhs);
} else if (context->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
HValue* rhs = environment()->arguments_environment()->Lookup(1);
- current_block()->AddLeaveInlined(rhs, state);
+ AddLeaveInlined(rhs, state);
}
} else {
// Return from a normal inlined function. Visit the subexpression in the
@@ -3323,11 +3485,11 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
- current_block()->AddLeaveInlined(Pop(), state);
+ AddLeaveInlined(Pop(), state);
}
}
set_current_block(NULL);
@@ -3361,8 +3523,6 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
}
- HValue* context = environment()->context();
-
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Pop();
@@ -3373,13 +3533,11 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// Test switch's tag value if all clauses are string literals
if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
- string_check = new(zone()) HIsStringAndBranch(tag_value);
first_test_block = graph()->CreateBasicBlock();
not_string_block = graph()->CreateBasicBlock();
-
- string_check->SetSuccessorAt(0, first_test_block);
- string_check->SetSuccessorAt(1, not_string_block);
- current_block()->Finish(string_check);
+ string_check = New<HIsStringAndBranch>(
+ tag_value, first_test_block, not_string_block);
+ FinishCurrentBlock(string_check);
set_current_block(first_test_block);
}
@@ -3408,21 +3566,21 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
HCompareNumericAndBranch* compare_ =
- new(zone()) HCompareNumericAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
+ New<HCompareNumericAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
compare_->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
compare = compare_;
} else {
- compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ compare = New<HStringCompareAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
- current_block()->Finish(compare);
+ FinishCurrentBlock(compare);
set_current_block(next_test_block);
}
@@ -3455,6 +3613,13 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block = NULL; // Cleared to indicate we've handled it.
}
} else {
+ // If the current test block is deoptimizing due to an unhandled clause
+ // of the switch, the test instruction is in the next block since the
+ // deopt must end the current block.
+ if (curr_test_block->IsDeoptimizing()) {
+ ASSERT(curr_test_block->end()->SecondSuccessor() == NULL);
+ curr_test_block = curr_test_block->end()->FirstSuccessor();
+ }
normal_block = curr_test_block->end()->FirstSuccessor();
curr_test_block = curr_test_block->end()->SecondSuccessor();
}
@@ -3496,8 +3661,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block,
stmt->ExitId()));
} else {
- if (fall_through_block != NULL) fall_through_block->Goto(break_block);
- if (last_block != NULL) last_block->Goto(break_block);
+ if (fall_through_block != NULL) Goto(fall_through_block, break_block);
+ if (last_block != NULL) Goto(last_block, break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
@@ -3509,9 +3674,8 @@ void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
Add<HSimulate>(stmt->StackCheckId());
- HValue* context = environment()->context();
- HStackCheck* stack_check = HStackCheck::cast(Add<HStackCheck>(
- context, HStackCheck::kBackwardsBranch));
+ HStackCheck* stack_check =
+ HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
ASSERT(loop_entry->IsLoopHeader());
loop_entry->loop_information()->set_stack_check(stack_check);
CHECK_BAILOUT(Visit(stmt->body()));
@@ -3523,7 +3687,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -3562,7 +3726,7 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@@ -3604,7 +3768,7 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@@ -3687,14 +3851,14 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
// Check that we still have more keys.
HCompareNumericAndBranch* compare_index =
- new(zone()) HCompareNumericAndBranch(index, limit, Token::LT);
+ New<HCompareNumericAndBranch>(index, limit, Token::LT);
compare_index->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
@@ -3703,7 +3867,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
compare_index->SetSuccessorAt(0, loop_body);
compare_index->SetSuccessorAt(1, loop_successor);
- current_block()->Finish(compare_index);
+ FinishCurrentBlock(compare_index);
set_current_block(loop_successor);
Drop(5);
@@ -3733,9 +3897,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(body_exit);
HValue* current_index = Pop();
- HInstruction* new_index = New<HAdd>(current_index,
- graph()->GetConstant1());
- PushAndAdd(new_index);
+ Push(Add<HAdd>(current_index, graph()->GetConstant1()));
body_exit = current_block();
}
@@ -3782,6 +3944,11 @@ void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
+void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
Code* unoptimized_code, FunctionLiteral* expr) {
int start_position = expr->start_position();
@@ -3812,19 +3979,18 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
- HValue* context = environment()->context();
HFunctionLiteral* instr =
- new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
+ New<HFunctionLiteral>(shared_info, expr->pretenure());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
+void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout(kSharedFunctionInfoLiteral);
+ return Bailout(kNativeFunctionLiteral);
}
@@ -3938,19 +4104,15 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(constant, expr->id());
} else {
HLoadGlobalCell* instr =
- new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
+ New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails());
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
- HValue* context = environment()->context();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
HLoadGlobalGeneric* instr =
- new(zone()) HLoadGlobalGeneric(context,
- global_object,
- variable->name(),
- ast_context()->is_for_typeof());
- instr->set_position(expr->position());
+ New<HLoadGlobalGeneric>(global_object,
+ variable->name(),
+ ast_context()->is_for_typeof());
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
@@ -3993,13 +4155,10 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
Handle<FixedArray> literals(closure->literals());
- HValue* context = environment()->context();
-
- HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
- literals,
- expr->pattern(),
- expr->flags(),
- expr->literal_index());
+ HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
+ expr->pattern(),
+ expr->flags(),
+ expr->literal_index());
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4064,20 +4223,6 @@ static bool LookupAccessorPair(Handle<Map> map,
}
-static bool LookupGetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* getter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->getter()->IsJSFunction()) {
- *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
- return true;
- }
- return false;
-}
-
-
static bool LookupSetter(Handle<Map> map,
Handle<String> name,
Handle<JSFunction>* setter,
@@ -4085,7 +4230,11 @@ static bool LookupSetter(Handle<Map> map,
Handle<AccessorPair> accessors;
if (LookupAccessorPair(map, name, &accessors, holder) &&
accessors->setter()->IsJSFunction()) {
- *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
+ Handle<JSFunction> func(JSFunction::cast(accessors->setter()));
+ CallOptimization call_optimization(func);
+ // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+ if (call_optimization.is_simple_api_call()) return false;
+ *setter = func;
return true;
}
return false;
@@ -4100,7 +4249,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int* max_properties) {
if (boilerplate->map()->is_deprecated()) {
Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
- if (result->IsSmi()) return false;
+ if (result.is_null()) return false;
}
ASSERT(max_depth >= 0 && *max_properties >= 0);
@@ -4166,18 +4315,23 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
- Handle<Object> boilerplate(closure->literals()->get(
- expr->literal_index()), isolate());
- if (boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(boilerplate),
- kMaxFastLiteralDepth,
- &max_properties)) {
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(boilerplate);
+ Handle<Object> literals_cell(closure->literals()->get(expr->literal_index()),
+ isolate());
+ Handle<AllocationSite> site;
+ Handle<JSObject> boilerplate;
+ if (!literals_cell->IsUndefined()) {
+ // Retrieve the boilerplate
+ site = Handle<AllocationSite>::cast(literals_cell);
+ boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+ isolate());
+ }
- literal = BuildFastLiteral(boilerplate_object,
- Handle<Object>::null(),
- DONT_TRACK_ALLOCATION_SITE);
+ if (!boilerplate.is_null() &&
+ IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+ AllocationSiteUsageContext usage_context(isolate(), site, false);
+ usage_context.EnterNewScope();
+ literal = BuildFastLiteral(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
Handle<FixedArray> closure_literals(closure->literals(), isolate());
@@ -4193,9 +4347,10 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Add<HPushArgument>(Add<HConstant>(constant_properties));
Add<HPushArgument>(Add<HConstant>(flags));
- Runtime::FunctionId function_id =
- (expr->depth() > 1 || expr->may_store_doubles())
- ? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow;
+ // TODO(mvstanton): Add a flag to turn off creation of any
+ // AllocationMementos for this call: we are in crankshaft and should have
+ // learned enough about transition behavior to stop emitting mementos.
+ Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -4285,51 +4440,50 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
bool uninitialized = false;
Handle<Object> literals_cell(literals->get(expr->literal_index()),
isolate());
- Handle<Object> raw_boilerplate;
+ Handle<JSObject> boilerplate_object;
if (literals_cell->IsUndefined()) {
uninitialized = true;
- raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
+ Handle<Object> raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements());
if (raw_boilerplate.is_null()) {
return Bailout(kArrayBoilerplateCreationFailed);
}
- site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(*raw_boilerplate);
+ boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
+ AllocationSiteCreationContext creation_context(isolate());
+ site = creation_context.EnterNewScope();
+ if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) {
+ return Bailout(kArrayBoilerplateCreationFailed);
+ }
+ creation_context.ExitScope(site, boilerplate_object);
literals->set(expr->literal_index(), *site);
- if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
+ if (boilerplate_object->elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
isolate()->counters()->cow_arrays_created_runtime()->Increment();
}
} else {
ASSERT(literals_cell->IsAllocationSite());
site = Handle<AllocationSite>::cast(literals_cell);
- raw_boilerplate = Handle<Object>(site->transition_info(), isolate());
+ boilerplate_object = Handle<JSObject>(
+ JSObject::cast(site->transition_info()), isolate());
}
- ASSERT(!raw_boilerplate.is_null());
- ASSERT(site->IsLiteralSite());
+ ASSERT(!boilerplate_object.is_null());
+ ASSERT(site->SitePointsToLiteral());
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(boilerplate_object)->GetElementsKind();
-
- // TODO(mvstanton): This heuristic is only a temporary solution. In the
- // end, we want to quit creating allocation site info after a certain number
- // of GCs for a call site.
- AllocationSiteMode mode = AllocationSite::GetMode(
- boilerplate_elements_kind);
+ boilerplate_object->GetElementsKind();
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
&max_properties)) {
- literal = BuildFastLiteral(boilerplate_object,
- site,
- mode);
+ AllocationSiteUsageContext usage_context(isolate(), site, false);
+ usage_context.EnterNewScope();
+ literal = BuildFastLiteral(boilerplate_object, &usage_context);
+ usage_context.ExitScope(site, boilerplate_object);
} else {
NoObservableSideEffectsScope no_effects(this);
// Boilerplate already exists and constant elements are never accessed,
@@ -4341,6 +4495,9 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Add<HPushArgument>(Add<HConstant>(literal_index));
Add<HPushArgument>(Add<HConstant>(constants));
+ // TODO(mvstanton): Consider a flag to turn off creation of any
+ // AllocationMementos for this call: we are in crankshaft and should have
+ // learned enough about transition behavior to stop emitting mementos.
Runtime::FunctionId function_id = (expr->depth() > 1)
? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
@@ -4399,31 +4556,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
-// Sets the lookup result and returns true if the load/store can be inlined.
-static bool ComputeLoadStoreField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup,
- bool is_store) {
- ASSERT(!is_store || !type->is_observed());
- if (!CanInlinePropertyAccess(*type)) {
- lookup->NotFound();
- return false;
- }
- // If we directly find a field, the access can be inlined.
- type->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsField()) return true;
-
- // For a load, we are out of luck if there is no such field.
- if (!is_store) return false;
-
- // 2nd chance: A store into a non-existent field can still be inlined if we
- // have a matching transition and some room left in the object.
- type->LookupTransition(NULL, *name, lookup);
- return lookup->IsTransitionToField(*type) &&
- (type->unused_property_fields() > 0);
-}
-
-
HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
Handle<Map> map) {
BuildCheckHeapObject(object);
@@ -4519,9 +4651,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
HValue* object,
Handle<String> name,
HValue* value) {
- HValue* context = environment()->context();
- return new(zone()) HStoreNamedGeneric(
- context,
+ return New<HStoreNamedGeneric>(
object,
name,
value,
@@ -4529,6 +4659,28 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
}
+// Sets the lookup result and returns true if the load/store can be inlined.
+static bool ComputeStoreField(Handle<Map> type,
+ Handle<String> name,
+ LookupResult* lookup,
+ bool lookup_transition = true) {
+ ASSERT(!type->is_observed());
+ if (!CanInlinePropertyAccess(*type)) {
+ lookup->NotFound();
+ return false;
+ }
+ // If we directly find a field, the access can be inlined.
+ type->LookupDescriptor(NULL, *name, lookup);
+ if (lookup->IsField()) return true;
+
+ if (!lookup_transition) return false;
+
+ type->LookupTransition(NULL, *name, lookup);
+ return lookup->IsTransitionToField(*type) &&
+ (type->unused_property_fields() > 0);
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
HValue* object,
Handle<String> name,
@@ -4536,7 +4688,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
Handle<Map> map) {
// Handle a store to a known field.
LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ if (ComputeStoreField(map, name, &lookup)) {
HCheckMaps* checked_object = AddCheckMap(object, map);
return BuildStoreNamedField(checked_object, name, value, map, &lookup);
}
@@ -4546,140 +4698,192 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
}
-static bool CanLoadPropertyFromPrototype(Handle<Map> map,
- Handle<Name> name,
- LookupResult* lookup) {
- if (!CanInlinePropertyAccess(*map)) return false;
- map->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsFound()) return false;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
+ PropertyAccessInfo* info) {
+ if (!CanInlinePropertyAccess(*map_)) return false;
+
+ if (!LookupDescriptor()) return false;
+
+ if (!lookup_.IsFound()) {
+ return (!info->lookup_.IsFound() || info->has_holder()) &&
+ map_->prototype() == info->map_->prototype();
+ }
+
+ // Mismatch if the other access info found the property in the prototype
+ // chain.
+ if (info->has_holder()) return false;
+
+ if (lookup_.IsPropertyCallbacks()) {
+ return accessor_.is_identical_to(info->accessor_);
+ }
+
+ if (lookup_.IsConstant()) {
+ return constant_.is_identical_to(info->constant_);
+ }
+
+ ASSERT(lookup_.IsField());
+ if (!info->lookup_.IsField()) return false;
+
+ Representation r = access_.representation();
+ if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ if (info->access_.offset() != access_.offset()) return false;
+ if (info->access_.IsInobject() != access_.IsInobject()) return false;
+ info->GeneralizeRepresentation(r);
return true;
}
-HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- if (types->length() > kMaxLoadPolymorphism) return NULL;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
+ map_->LookupDescriptor(NULL, *name_, &lookup_);
+ return LoadResult(map_);
+}
- LookupResult lookup(isolate());
- int count;
- HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
- for (count = 0; count < types->length(); ++count) {
- Handle<Map> map = types->at(count);
- if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
- HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+ if (lookup_.IsField()) {
+ access_ = HObjectAccess::ForField(map, &lookup_, name_);
+ } else if (lookup_.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
+ if (!callback->IsAccessorPair()) return false;
+ Object* getter = Handle<AccessorPair>::cast(callback)->getter();
+ if (!getter->IsJSFunction()) return false;
+ Handle<JSFunction> accessor = handle(JSFunction::cast(getter));
+ CallOptimization call_optimization(accessor);
+ // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+ if (call_optimization.is_simple_api_call()) return false;
+ accessor_ = accessor;
+ } else if (lookup_.IsConstant()) {
+ constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
+ }
- if (count == 0) {
- // First time through the loop; set access and representation.
- access = new_access;
- } else if (!access.representation().IsCompatibleForLoad(
- new_access.representation())) {
- // Representations did not match.
- break;
- } else if (access.offset() != new_access.offset()) {
- // Offsets did not match.
- break;
- } else if (access.IsInobject() != new_access.IsInobject()) {
- // In-objectness did not match.
- break;
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
+ Handle<Map> map = map_;
+ while (map->prototype()->IsJSObject()) {
+ holder_ = handle(JSObject::cast(map->prototype()));
+ if (holder_->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(holder_);
+ }
+ map = Handle<Map>(holder_->map());
+ if (!CanInlinePropertyAccess(*map)) {
+ lookup_.NotFound();
+ return false;
}
- access = access.WithRepresentation(
- access.representation().generalize(new_access.representation()));
+ map->LookupDescriptor(*holder_, *name_, &lookup_);
+ if (lookup_.IsFound()) return LoadResult(map);
}
+ lookup_.NotFound();
+ return true;
+}
- if (count == types->length()) {
- // Everything matched; can use monomorphic load.
- BuildCheckHeapObject(object);
- HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
- return BuildLoadNamedField(checked_object, access);
- }
- if (count != 0) return NULL;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
+ if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
+ if (IsJSObjectFieldAccessor()) return true;
+ if (!LookupDescriptor()) return false;
+ if (lookup_.IsFound()) return true;
+ return LookupInPrototypes();
+}
- // Second chance: the property is on the prototype and all maps have the
- // same prototype.
- Handle<Map> map(types->at(0));
- if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return NULL;
- Handle<Object> prototype(map->prototype(), isolate());
- for (count = 1; count < types->length(); ++count) {
- Handle<Map> test_map(types->at(count));
- if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return NULL;
- if (test_map->prototype() != *prototype) return NULL;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic(
+ SmallMapList* types) {
+ ASSERT(map_.is_identical_to(types->first()));
+ if (!CanLoadMonomorphic()) return false;
+ if (types->length() > kMaxLoadPolymorphism) return false;
+
+ if (IsStringLength()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
+ }
+
+ if (IsArrayLength()) {
+ bool is_fast = IsFastElementsKind(map_->elements_kind());
+ for (int i = 1; i < types->length(); ++i) {
+ Handle<Map> test_map = types->at(i);
+ if (test_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (IsFastElementsKind(test_map->elements_kind()) != is_fast) {
+ return false;
+ }
+ }
+ return true;
}
- LookupInPrototypes(map, name, &lookup);
- if (!lookup.IsField()) return NULL;
+ if (IsJSObjectFieldAccessor()) {
+ InstanceType instance_type = map_->instance_type();
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() != instance_type) return false;
+ }
+ return true;
+ }
- BuildCheckHeapObject(object);
- Add<HCheckMaps>(object, types);
+ for (int i = 1; i < types->length(); ++i) {
+ PropertyAccessInfo test_info(isolate(), types->at(i), name_);
+ if (!test_info.IsCompatibleForLoad(this)) return false;
+ }
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- HValue* checked_holder = BuildCheckPrototypeMaps(
- Handle<JSObject>::cast(prototype), holder);
- return BuildLoadNamedField(checked_holder,
- HObjectAccess::ForField(holder_map, &lookup, name));
+ return true;
}
-// Returns true if an instance of this map can never find a property with this
-// name in its prototype chain. This means all prototypes up to the top are
-// fast and don't have the name in them. It would be good if we could optimize
-// polymorphic loads where the property is sometimes found in the prototype
-// chain.
-static bool PrototypeChainCanNeverResolve(
- Handle<Map> map, Handle<String> name) {
- Isolate* isolate = map->GetIsolate();
- Object* current = map->prototype();
- while (current != isolate->heap()->null_value()) {
- if (current->IsJSGlobalProxy() ||
- current->IsGlobalObject() ||
- !current->IsJSObject() ||
- !CanInlinePropertyAccess(JSObject::cast(current)->map()) ||
- JSObject::cast(current)->IsAccessCheckNeeded()) {
- return false;
- }
+HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
+ PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor) {
- LookupResult lookup(isolate);
- Map* map = JSObject::cast(current)->map();
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) return false;
- if (!lookup.IsCacheable()) return false;
- current = JSObject::cast(current)->GetPrototype();
+ HObjectAccess access = HObjectAccess::ForMap(); // bogus default
+ if (info->GetJSObjectFieldAccess(&access)) {
+ return New<HLoadNamedField>(checked_object, access);
}
- return true;
+
+ HValue* checked_holder = checked_object;
+ if (info->has_holder()) {
+ Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
+ checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
+ }
+
+ if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined();
+
+ if (info->lookup()->IsField()) {
+ return BuildLoadNamedField(checked_holder, info->access());
+ }
+
+ if (info->lookup()->IsPropertyCallbacks()) {
+ Push(checked_object);
+ if (FLAG_inline_accessors &&
+ can_inline_accessor &&
+ TryInlineGetter(info->accessor(), ast_id, return_id)) {
+ return NULL;
+ }
+ Add<HPushArgument>(Pop());
+ return New<HCallConstantFunction>(info->accessor(), 1);
+ }
+
+ ASSERT(info->lookup()->IsConstant());
+ return New<HConstant>(info->constant());
}
void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
- int position,
BailoutId ast_id,
+ BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- HInstruction* instr = TryLoadPolymorphicAsMonomorphic(object, types, name);
- if (instr != NULL) {
- instr->set_position(position);
- return ast_context()->ReturnInstruction(instr, ast_id);
- }
-
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, false) ||
- (lookup.IsCacheable() &&
- CanInlinePropertyAccess(*map) &&
- (lookup.IsConstant() ||
- (!lookup.IsFound() &&
- PrototypeChainCanNeverResolve(map, name))))) {
+ PropertyAccessInfo info(isolate(), types->at(i), name);
+ if (info.CanLoadMonomorphic()) {
if (count == 0) {
BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
@@ -4687,37 +4891,24 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
+ HCompareMap* compare = New<HCompareMap>(
+ object, info.map(), if_true, if_false);
+ FinishCurrentBlock(compare);
set_current_block(if_true);
- // TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic.
- if (lookup.IsField()) {
- HObjectAccess access = HObjectAccess::ForField(map, &lookup, name);
- HLoadNamedField* load = BuildLoadNamedField(compare, access);
- load->set_position(position);
- AddInstruction(load);
- if (!ast_context()->IsEffect()) Push(load);
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
- HConstant* hconstant = Add<HConstant>(constant);
- if (!ast_context()->IsEffect()) Push(hconstant);
+ HInstruction* load = BuildLoadMonomorphic(
+ &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining);
+ if (load == NULL) {
+ if (HasStackOverflow()) return;
} else {
- ASSERT(!lookup.IsFound());
- if (map->prototype()->IsJSObject()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder = prototype;
- while (holder->map()->prototype()->IsJSObject()) {
- holder = handle(JSObject::cast(holder->map()->prototype()));
- }
- BuildCheckPrototypeMaps(prototype, holder);
+ if (!load->IsLinked()) {
+ AddInstruction(load);
}
- if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+ if (!ast_context()->IsEffect()) Push(load);
}
- current_block()->Goto(join);
+ if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
}
@@ -4726,16 +4917,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ // Because the deopt may be the only path in the polymorphic load, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful load.
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
} else {
- HValue* context = environment()->context();
- HInstruction* load = new(zone()) HLoadNamedGeneric(context, object, name);
- load->set_position(position);
- AddInstruction(load);
+ HInstruction* load = Add<HLoadNamedGeneric>(object, name);
if (!ast_context()->IsEffect()) Push(load);
if (join != NULL) {
- current_block()->Goto(join);
+ Goto(join);
} else {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
@@ -4751,7 +4943,6 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
- int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
@@ -4761,8 +4952,6 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
// for all maps. Requires special map check on the set of all handled maps.
if (types->length() > kMaxStorePolymorphism) return false;
- // TODO(verwaest): Merge the checking logic with the code in
- // TryLoadPolymorphicAsMonomorphic.
LookupResult lookup(isolate());
int count;
Representation representation = Representation::None();
@@ -4770,7 +4959,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
for (count = 0; count < types->length(); ++count) {
Handle<Map> map = types->at(count);
// Pass false to ignore transitions.
- if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+ if (!ComputeStoreField(map, name, &lookup, false)) break;
ASSERT(!map->is_observed());
HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
@@ -4803,7 +4992,6 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
checked_object, name, value, types->at(count - 1), &lookup),
true);
if (!ast_context()->IsEffect()) Push(value);
- store->set_position(position);
AddInstruction(store);
Add<HSimulate>(assignment_id);
if (!ast_context()->IsEffect()) Drop(1);
@@ -4813,14 +5001,13 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
Handle<String> name) {
if (TryStorePolymorphicAsMonomorphic(
- position, assignment_id, object, value, types, name)) {
+ assignment_id, object, value, types, name)) {
return;
}
@@ -4832,7 +5019,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ if (ComputeStoreField(map, name, &lookup)) {
if (count == 0) {
BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
@@ -4840,19 +5027,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
+ HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false);
+ FinishCurrentBlock(compare);
set_current_block(if_true);
HInstruction* instr;
CHECK_ALIVE(instr = BuildStoreNamedField(
compare, name, value, map, &lookup));
- instr->set_position(position);
// Goto will add the HSimulate for the store.
AddInstruction(instr);
if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
+ Goto(join);
set_current_block(if_false);
}
@@ -4865,14 +5050,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- instr->set_position(position);
AddInstruction(instr);
if (join != NULL) {
if (!ast_context()->IsEffect()) {
Push(value);
}
- current_block()->Goto(join);
+ Goto(join);
} else {
// The HSimulate for the store should not see the stored value in
// effect contexts (it is not materialized at expr->id() in the
@@ -4926,8 +5110,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr, return_id,
- expr->position(),
+ HandleKeyedElementAccess(object, key, value, expr,
true, // is_store
&has_side_effects);
Drop(3);
@@ -4966,7 +5149,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
Drop(2);
Add<HPushArgument>(object);
Add<HPushArgument>(value);
- instr = new(zone()) HCallConstantFunction(setter, 2);
+ instr = New<HCallConstantFunction>(setter, 2);
} else {
Drop(2);
CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
@@ -4976,15 +5159,13 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
}
} else if (types != NULL && types->length() > 1) {
Drop(2);
- return HandlePolymorphicStoreNamedField(
- expr->position(), ast_id, object, value, types, name);
+ return HandlePolymorphicStoreNamedField(ast_id, object, value, types, name);
} else {
Drop(2);
instr = BuildStoreNamedGeneric(object, name, value);
}
if (!ast_context()->IsEffect()) Push(value);
- instr->set_position(expr->position());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -5013,7 +5194,6 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Variable* var,
HValue* value,
- int position,
BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
@@ -5036,7 +5216,6 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
HInstruction* instr =
Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
- instr->set_position(position);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -5045,7 +5224,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HStoreGlobalGeneric* instr =
Add<HStoreGlobalGeneric>(global_object, var->name(),
value, function_strict_mode_flag());
- instr->set_position(position);
+ USE(instr);
ASSERT(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -5074,7 +5253,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
Top(),
- expr->position(),
expr->AssignmentId());
break;
@@ -5136,22 +5314,21 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
HValue* key = NULL;
- if ((!prop->IsStringLength() &&
- !prop->IsFunctionPrototype() &&
- !prop->key()->IsPropertyName()) ||
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
}
- CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
+ CHECK_ALIVE(PushLoad(prop, object, key));
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HValue* left = Pop();
HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
+ AddInstruction(instr);
+ Push(instr);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
}
@@ -5207,7 +5384,6 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var,
Top(),
- expr->position(),
expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
@@ -5306,9 +5482,16 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- HThrow* instr = Add<HThrow>(value);
- instr->set_position(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ Add<HThrow>(value);
Add<HSimulate>(expr->id());
+
+ // If the throw definitely exits the function, we can finish with a dummy
+ // control flow at this point. This is not the case if the throw is inside
+ // an inlined function which may be replaced.
+ if (call_context() == NULL) {
+ FinishExitCurrentBlock(New<HAbnormalExit>());
+ }
}
@@ -5327,6 +5510,12 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
}
+HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ return AddInstruction(BuildLoadNamedField(object, access));
+}
+
+
HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
HValue* checked_string) {
if (FLAG_fold_constants && object->IsConstant()) {
@@ -5347,93 +5536,14 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
Add<HDeoptimize>("Insufficient type feedback for generic named load",
Deoptimizer::SOFT);
}
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
- HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
- Add<HPushArgument>(object);
- return new(zone()) HCallConstantFunction(getter, 1);
+ return New<HLoadNamedGeneric>(object, name);
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- Handle<Map> map) {
- // Handle a load from a known field.
- ASSERT(!map->is_dictionary_map());
-
- // Handle access to various length properties
- if (name->Equals(isolate()->heap()->length_string())) {
- if (map->instance_type() == JS_ARRAY_TYPE) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- return New<HLoadNamedField>(
- checked_object, HObjectAccess::ForArrayLength(map->elements_kind()));
- }
- }
-
- LookupResult lookup(isolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsField()) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- ASSERT(map->IsJSObjectMap());
- return BuildLoadNamedField(
- checked_object, HObjectAccess::ForField(map, &lookup, name));
- }
-
- // Handle a load of a constant known function.
- if (lookup.IsConstant()) {
- AddCheckMap(object, map);
- Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
- return New<HConstant>(constant);
- }
-
- if (lookup.IsFound()) {
- // Cannot handle the property, do a generic load instead.
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
- }
-
- // Handle a load from a known field somewhere in the prototype chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsField()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMap(object, map);
- HValue* checked_holder = BuildCheckPrototypeMaps(prototype, holder);
- return BuildLoadNamedField(
- checked_holder, HObjectAccess::ForField(holder_map, &lookup, name));
- }
-
- // Handle a load of a constant function somewhere in the prototype chain.
- if (lookup.IsConstant()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMap(object, map);
- BuildCheckPrototypeMaps(prototype, holder);
- Handle<Object> constant(lookup.GetConstantFromMap(*holder_map), isolate());
- return New<HConstant>(constant);
- }
-
- // No luck, do a generic load.
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
-}
-
HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
- HValue* context = environment()->context();
- return new(zone()) HLoadKeyedGeneric(context, object, key);
+ return New<HLoadKeyedGeneric>(object, key);
}
@@ -5547,8 +5657,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- BailoutId ast_id,
- int position,
bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
@@ -5560,9 +5668,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
*has_side_effects |= consolidated_load->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) {
- consolidated_load->set_position(position);
- }
return consolidated_load;
}
}
@@ -5619,7 +5724,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
return is_store ? NULL : instr;
}
@@ -5632,8 +5736,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
HCompareMap* mapcompare =
- new(zone()) HCompareMap(object, map, this_map, other_map);
- current_block()->Finish(mapcompare);
+ New<HCompareMap>(object, map, this_map, other_map);
+ FinishCurrentBlock(mapcompare);
set_current_block(this_map);
HInstruction* access = NULL;
@@ -5656,12 +5760,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (position != RelocInfo::kNoPosition) access->set_position(position);
if (!is_store) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
- current_block()->GotoNoSimulate(join);
+ GotoNoSimulate(join);
set_current_block(other_map);
}
@@ -5679,8 +5782,6 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- BailoutId ast_id,
- int position,
bool is_store,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
@@ -5702,17 +5803,18 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
}
} else if (types != NULL && !types->is_empty()) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, ast_id, position, is_store,
+ obj, key, val, types, is_store,
expr->GetStoreMode(), has_side_effects);
} else {
if (is_store) {
- if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
+ if (expr->IsAssignment() &&
+ expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
- if (expr->AsProperty()->IsUninitialized()) {
+ if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
@@ -5720,7 +5822,6 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
}
AddInstruction(instr);
}
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
@@ -5730,9 +5831,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
HValue* object,
HValue* key,
HValue* value) {
- HValue* context = environment()->context();
- return new(zone()) HStoreKeyedGeneric(
- context,
+ return New<HStoreKeyedGeneric>(
object,
key,
value,
@@ -5799,7 +5898,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HInstruction* checked_key = Add<HBoundsCheck>(key, length);
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ result = New<HAccessArgumentsAt>(elements, length, checked_key);
} else {
EnsureArgumentsArePushedForAccess();
@@ -5809,7 +5908,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
arguments_environment()->parameter_count() - 1;
HInstruction* length = Add<HConstant>(argument_count);
HInstruction* checked_key = Add<HBoundsCheck>(key, length);
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ result = New<HAccessArgumentsAt>(elements, length, checked_key);
}
}
ast_context()->ReturnInstruction(result, expr->id());
@@ -5819,66 +5918,66 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
void HOptimizedGraphBuilder::PushLoad(Property* expr,
HValue* object,
- HValue* key,
- int position) {
+ HValue* key) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
Push(object);
if (key != NULL) Push(key);
- BuildLoad(expr, position, expr->LoadId());
+ BuildLoad(expr, expr->LoadId());
+}
+
+
+static bool AreStringTypes(SmallMapList* types) {
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
}
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
- int position,
BailoutId ast_id) {
HInstruction* instr = NULL;
- if (expr->IsStringLength()) {
- HValue* string = Pop();
- BuildCheckHeapObject(string);
- HInstruction* checkstring =
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- instr = BuildLoadStringLength(string, checkstring);
- } else if (expr->IsStringAccess()) {
+ if (expr->IsStringAccess()) {
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->context();
- HInstruction* char_code =
- BuildStringCharCodeAt(string, index);
+ HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
- instr = HStringCharFromCode::New(zone(), context, char_code);
+ instr = NewUncasted<HStringCharFromCode>(char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
BuildCheckHeapObject(function);
- instr = new(zone()) HLoadFunctionPrototype(function);
+ instr = New<HLoadFunctionPrototype>(function);
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- HValue* object = Top();
+ HValue* object = Pop();
SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+ ComputeReceiverTypes(expr, object, &types);
+ ASSERT(types != NULL);
+
+ if (types->length() > 0) {
+ PropertyAccessInfo info(isolate(), types->first(), name);
+ if (!info.CanLoadAsMonomorphic(types)) {
+ return HandlePolymorphicLoadNamedField(
+ ast_id, expr->LoadId(), object, types, name);
+ }
- if (monomorphic) {
- Handle<Map> map = types->first();
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- AddCheckConstantFunction(holder, Top(), map);
- if (FLAG_inline_accessors &&
- TryInlineGetter(getter, ast_id, expr->LoadId())) {
- return;
- }
- Add<HPushArgument>(Pop());
- instr = new(zone()) HCallConstantFunction(getter, 1);
+ BuildCheckHeapObject(object);
+ HInstruction* checked_object;
+ if (AreStringTypes(types)) {
+ checked_object =
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
} else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, map);
+ checked_object = Add<HCheckMaps>(object, types);
}
- } else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(
- position, ast_id, Pop(), types, name);
+ instr = BuildLoadMonomorphic(
+ &info, object, checked_object, ast_id, expr->LoadId());
+ if (instr == NULL) return;
+ if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
- instr = BuildLoadNamedGeneric(Pop(), name, expr);
+ instr = BuildLoadNamedGeneric(object, name, expr);
}
} else {
@@ -5887,7 +5986,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, ast_id, position,
+ obj, key, NULL, expr,
false, // is_store
&has_side_effects);
if (has_side_effects) {
@@ -5901,7 +6000,6 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
}
return ast_context()->ReturnValue(load);
}
- instr->set_position(position);
return ast_context()->ReturnInstruction(instr, ast_id);
}
@@ -5914,14 +6012,12 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
if (TryArgumentsAccess(expr)) return;
CHECK_ALIVE(VisitForValue(expr->obj()));
- if ((!expr->IsStringLength() &&
- !expr->IsFunctionPrototype() &&
- !expr->key()->IsPropertyName()) ||
+ if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
}
- BuildLoad(expr, expr->position(), expr->id());
+ BuildLoad(expr, expr->id());
}
@@ -6015,22 +6111,13 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
Handle<String> name) {
if (types->length() > kMaxCallPolymorphism) return false;
- Handle<Map> map(types->at(0));
- LookupResult lookup(isolate());
- if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return false;
-
- Handle<Object> prototype(map->prototype(), isolate());
- for (int count = 1; count < types->length(); ++count) {
- Handle<Map> test_map(types->at(count));
- if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return false;
- if (test_map->prototype() != *prototype) return false;
- }
-
- if (!expr->ComputeTarget(map, name)) return false;
+ PropertyAccessInfo info(isolate(), types->at(0), name);
+ if (!info.CanLoadAsMonomorphic(types)) return false;
+ if (!expr->ComputeTarget(info.map(), name)) return false;
BuildCheckHeapObject(receiver);
Add<HCheckMaps>(receiver, types);
- AddCheckPrototypeMaps(expr->holder(), map);
+ AddCheckPrototypeMaps(expr->holder(), info.map());
if (FLAG_trace_inlining) {
Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
@@ -6042,8 +6129,7 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
if (!TryInlineCall(expr)) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
+ New<HCallConstantFunction>(expr->target(), argument_count);
PreProcessCall(call);
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
@@ -6107,11 +6193,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
number_block = graph()->CreateBasicBlock();
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(receiver);
- smicheck->SetSuccessorAt(0, empty_smi_block);
- smicheck->SetSuccessorAt(1, not_smi_block);
- current_block()->Finish(smicheck);
- empty_smi_block->Goto(number_block);
+ FinishCurrentBlock(New<HIsSmiAndBranch>(
+ receiver, empty_smi_block, not_smi_block));
+ Goto(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
@@ -6122,27 +6206,24 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HUnaryControlInstruction* compare;
if (handle_smi && map.is_identical_to(number_marker_map)) {
- compare = new(zone()) HCompareMap(
- receiver, heap_number_map, if_true, if_false);
+ compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
map = initial_number_map;
expr->set_number_check(
Handle<JSObject>(JSObject::cast(map->prototype())));
} else if (map.is_identical_to(string_marker_map)) {
- compare = new(zone()) HIsStringAndBranch(receiver);
- compare->SetSuccessorAt(0, if_true);
- compare->SetSuccessorAt(1, if_false);
+ compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
map = initial_string_map;
expr->set_string_check(
Handle<JSObject>(JSObject::cast(map->prototype())));
} else {
- compare = new(zone()) HCompareMap(receiver, map, if_true, if_false);
+ compare = New<HCompareMap>(receiver, map, if_true, if_false);
expr->set_map_check();
}
- current_block()->Finish(compare);
+ FinishCurrentBlock(compare);
if (expr->check_type() == NUMBER_CHECK) {
- if_true->Goto(number_block);
+ Goto(if_true, number_block);
if_true = number_block;
number_block->SetJoinId(expr->id());
}
@@ -6164,14 +6245,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
if (HasStackOverflow()) return;
} else {
HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
+ New<HCallConstantFunction>(expr->target(), argument_count);
PreProcessCall(call);
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
}
- if (current_block() != NULL) current_block()->Goto(join);
+ if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
@@ -6182,18 +6262,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// Because the deopt may be the only path in the polymorphic call, make sure
// that the environment stack matches the depth on deopt that it otherwise
// would have had after a successful call.
- Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
+ Drop(argument_count);
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
- HValue* context = environment()->context();
- HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
- call->set_position(expr->position());
+ HCallNamed* call = New<HCallNamed>(name, argument_count);
PreProcessCall(call);
if (join != NULL) {
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
- current_block()->Goto(join);
+ Goto(join);
} else {
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -6282,7 +6361,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return false;
}
-#if !V8_TARGET_ARCH_IA32
+#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
// Target must be able to use caller's context.
CompilationInfo* outer_info = current_info();
if (target->context() != outer_info->closure()->context() ||
@@ -6431,9 +6510,9 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
undefined,
function_state()->inlining_kind(),
undefined_receiver);
-#if V8_TARGET_ARCH_IA32
- // IA32 only, overwrite the caller's context in the deoptimization
- // environment with the correct one.
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+ // IA32, ARM and MIPS only, overwrite the caller's context in the
+ // deoptimization environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
@@ -6495,12 +6574,12 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// return value will always evaluate to true, in a value context the
// return value is the newly allocated receiver.
if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_true(), state);
+ Goto(inlined_test_context()->if_true(), state);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
+ AddLeaveInlined(implicit_return_value, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Falling off the end of an inlined setter call. The returned value is
@@ -6509,21 +6588,21 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
if (call_context()->IsTest()) {
inlined_test_context()->ReturnValue(implicit_return_value);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
+ AddLeaveInlined(implicit_return_value, state);
}
} else {
// Falling off the end of a normal inlined function. This basically means
// returning undefined.
if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_false(), state);
+ Goto(inlined_test_context()->if_false(), state);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(undefined, state);
+ AddLeaveInlined(undefined, state);
}
}
}
@@ -6545,13 +6624,13 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
entry->RegisterReturnTarget(if_true, zone());
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target, function_state());
+ Goto(if_true, true_target, function_state());
}
if (if_false->HasPredecessor()) {
entry->RegisterReturnTarget(if_false, zone());
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target, function_state());
+ Goto(if_false, false_target, function_state());
}
set_current_block(NULL);
return true;
@@ -6654,11 +6733,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
case kMathTan:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
- HValue* context = environment()->context();
Drop(1); // Receiver.
- HInstruction* op =
- HUnaryMathOperation::New(zone(), context, argument, id);
- op->set_position(expr->position());
+ HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -6669,8 +6745,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->context();
- HInstruction* op = HMul::NewImul(zone(), context, left, right);
+ HInstruction* op = HMul::NewImul(zone(), context(), left, right);
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -6700,7 +6775,6 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2 && check_type == STRING_CHECK) {
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->context();
ASSERT(!expr->holder().is_null());
BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck(
STRING_CHECK, expr->holder()->GetIsolate()),
@@ -6712,8 +6786,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
return true;
}
AddInstruction(char_code);
- HInstruction* result =
- HStringCharFromCode::New(zone(), context, char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6722,10 +6795,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* argument = Pop();
- HValue* context = environment()->context();
Drop(1); // Receiver.
- HInstruction* result =
- HStringCharFromCode::New(zone(), context, argument);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6744,11 +6815,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* argument = Pop();
- HValue* context = environment()->context();
Drop(1); // Receiver.
- HInstruction* op =
- HUnaryMathOperation::New(zone(), context, argument, id);
- op->set_position(expr->position());
+ HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -6759,30 +6827,27 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
- HValue* context = environment()->context();
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
- result =
- HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
+ result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf);
} else if (exponent == -0.5) {
HValue* one = graph()->GetConstant1();
- HInstruction* sqrt =
- HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
- AddInstruction(sqrt);
+ HInstruction* sqrt = AddUncasted<HUnaryMathOperation>(
+ left, kMathPowHalf);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
ASSERT(!sqrt->HasObservableSideEffects());
- result = HDiv::New(zone(), context, one, sqrt);
+ result = NewUncasted<HDiv>(one, sqrt);
} else if (exponent == 2.0) {
- result = HMul::New(zone(), context, left, left);
+ result = NewUncasted<HMul>(left, left);
}
}
if (result == NULL) {
- result = HPower::New(zone(), context, left, right);
+ result = NewUncasted<HPower>(left, right);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -6793,7 +6858,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
Drop(1); // Receiver.
HGlobalObject* global_object = Add<HGlobalObject>();
- HRandom* result = new(zone()) HRandom(global_object);
+ HRandom* result = New<HRandom>(global_object);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6805,11 +6870,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->context();
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
- HInstruction* result =
- HMathMinMax::New(zone(), context, left, right, op);
+ HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6820,8 +6883,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->context();
- HInstruction* result = HMul::NewImul(zone(), context, left, right);
+ HInstruction* result = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6872,12 +6934,10 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
- HInstruction* result =
- new(zone()) HApplyArguments(function,
- wrapped_receiver,
- length,
- elements);
- result->set_position(expr->position());
+ HInstruction* result = New<HApplyArguments>(function,
+ wrapped_receiver,
+ length,
+ elements);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else {
@@ -6903,19 +6963,15 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
}
Drop(arguments_count - 1);
- PushAndAdd(New<HPushArgument>(Pop()));
+ Push(Add<HPushArgument>(Pop()));
for (int i = 1; i < arguments_count; i++) {
- PushAndAdd(New<HPushArgument>(arguments_values->at(i)));
+ Push(Add<HPushArgument>(arguments_values->at(i)));
}
- HValue* context = environment()->context();
- HInvokeFunction* call = new(zone()) HInvokeFunction(
- context,
- function,
- known_function,
- arguments_count);
+ HInvokeFunction* call = New<HInvokeFunction>(function,
+ known_function,
+ arguments_count);
Drop(arguments_count);
- call->set_position(expr->position());
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
@@ -6945,9 +7001,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HValue* context = environment()->context();
- call = new(zone()) HCallKeyed(context, key, argument_count);
- call->set_position(expr->position());
+ call = New<HCallKeyed>(key, argument_count);
Drop(argument_count + 1); // 1 is the key.
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -6985,16 +7039,13 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the IC
// when a primitive receiver check is required.
- HValue* context = environment()->context();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
+ call = PreProcessCall(New<HCallNamed>(name, argument_count));
} else {
AddCheckConstantFunction(expr->holder(), receiver, map);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
- new(zone()) HCallConstantFunction(expr->target(),
- argument_count));
+ New<HCallConstantFunction>(expr->target(), argument_count));
}
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -7002,11 +7053,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return;
} else {
- HValue* context = environment()->context();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
+ call = PreProcessCall(New<HCallNamed>(name, argument_count));
}
-
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
@@ -7030,9 +7078,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (known_global_function) {
// Push the global object instead of the global receiver because
// code generated by the full code generator expects it.
- HValue* context = environment()->context();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- PushAndAdd(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ Push(global_object);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
CHECK_ALIVE(VisitForValue(expr->expression()));
@@ -7064,16 +7111,14 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code.
- HValue* context = environment()->context();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, var->name(), argument_count));
+ call = PreProcessCall(New<HCallNamed>(var->name(), argument_count));
} else {
- call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
- argument_count));
+ call = PreProcessCall(New<HCallKnownGlobal>(
+ expr->target(), argument_count));
}
} else {
HGlobalObject* receiver = Add<HGlobalObject>();
- PushAndAdd(New<HPushArgument>(receiver));
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
call = New<HCallGlobal>(var->name(), argument_count);
@@ -7086,8 +7131,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HGlobalObject* global = Add<HGlobalObject>();
- HGlobalReceiver* receiver = New<HGlobalReceiver>(global);
- PushAndAdd(receiver);
+ HGlobalReceiver* receiver = Add<HGlobalReceiver>(global);
+ Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Add<HCheckValue>(function, expr->target());
@@ -7113,7 +7158,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
HValue* function = Top();
HGlobalObject* global_object = Add<HGlobalObject>();
HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
- PushAndAdd(New<HPushArgument>(receiver));
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
call = New<HCallFunction>(function, argument_count);
@@ -7121,7 +7166,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
}
- call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -7139,8 +7183,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* context = environment()->context();
Factory* factory = isolate()->factory();
if (FLAG_inline_construct &&
@@ -7229,9 +7273,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
- HInstruction* call = PreProcessCall(
- new(zone()) HCallNew(context, function, argument_count));
- call->set_position(expr->position());
+ HInstruction* call =
+ PreProcessCall(New<HCallNew>(function, argument_count));
return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
@@ -7245,13 +7288,12 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
if (expr->target().is_identical_to(array_function)) {
Handle<Cell> cell = expr->allocation_info_cell();
Add<HCheckValue>(constructor, array_function);
- call = new(zone()) HCallNewArray(context, constructor, argument_count,
- cell, expr->elements_kind());
+ call = New<HCallNewArray>(constructor, argument_count,
+ cell, expr->elements_kind());
} else {
- call = new(zone()) HCallNew(context, constructor, argument_count);
+ call = New<HCallNew>(constructor, argument_count);
}
Drop(argument_count);
- call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -7373,8 +7415,7 @@ void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
- HValue* context = environment()->context();
- HInstruction* instr = new(zone()) HTypeof(context, value);
+ HInstruction* instr = New<HTypeof>(value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -7427,7 +7468,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- TypeInfo info = expr->type();
+ Handle<Type> info = expr->type();
Representation rep = Representation::FromType(info);
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
@@ -7483,6 +7524,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -7515,7 +7557,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
after,
- expr->position(),
expr->AssignmentId());
break;
@@ -7567,15 +7608,13 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
HValue* object = Top();
HValue* key = NULL;
- if ((!prop->IsStringLength() &&
- !prop->IsFunctionPrototype() &&
- !prop->key()->IsPropertyName()) ||
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
}
- CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
+ CHECK_ALIVE(PushLoad(prop, object, key));
after = BuildIncrement(returns_original_input, expr);
@@ -7611,7 +7650,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
}
BuildCheckHeapObject(string);
HValue* checkstring =
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
+ Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
HInstruction* length = BuildLoadStringLength(string, checkstring);
AddInstruction(length);
HInstruction* checked_index = Add<HBoundsCheck>(index, length);
@@ -7619,9 +7658,16 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
}
-// Checks if the given shift amounts have form: (sa) and (32 - sa).
+// Checks if the given shift amounts have following forms:
+// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
HValue* const32_minus_sa) {
+ if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
+ const HConstant* c1 = HConstant::cast(sa);
+ const HConstant* c2 = HConstant::cast(const32_minus_sa);
+ return c1->HasInteger32Value() && c2->HasInteger32Value() &&
+ (c1->Integer32Value() + c2->Integer32Value() == 32);
+ }
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
if (sa != sub->right()) return false;
@@ -7638,10 +7684,10 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
-bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
@@ -7677,6 +7723,18 @@ bool CanBeZero(HValue* right) {
}
+HValue* HGraphBuilder::EnforceNumberType(HValue* number,
+ Handle<Type> expected) {
+ if (expected->Is(Type::Smi())) {
+ return Add<HForceRepresentation>(number, Representation::Smi());
+ }
+ if (expected->Is(Type::Signed32())) {
+ return Add<HForceRepresentation>(number, Representation::Integer32());
+ }
+ return number;
+}
+
+
HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
@@ -7687,6 +7745,33 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
}
}
+ // We put temporary values on the stack, which don't correspond to anything
+ // in baseline code. Since nothing is observable we avoid recording those
+ // pushes with a NoObservableSideEffectsScope.
+ NoObservableSideEffectsScope no_effects(this);
+
+ Handle<Type> expected_type = *expected;
+
+ // Separate the number type from the rest.
+ Handle<Type> expected_obj = handle(Type::Intersect(
+ expected_type, handle(Type::NonNumber(), isolate())), isolate());
+ Handle<Type> expected_number = handle(Type::Intersect(
+ expected_type, handle(Type::Number(), isolate())), isolate());
+
+ // We expect to get a number.
+ // (We need to check first, since Type::None->Is(Type::Any()) == true.
+ if (expected_obj->Is(Type::None())) {
+ ASSERT(!expected_number->Is(Type::None()));
+ return value;
+ }
+
+ if (expected_obj->Is(Type::Undefined())) {
+ // This is already done by HChange.
+ *expected = handle(Type::Union(
+ expected_number, handle(Type::Double(), isolate())), isolate());
+ return value;
+ }
+
return value;
}
@@ -7695,89 +7780,156 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
HValue* right) {
- HValue* context = environment()->context();
Handle<Type> left_type = expr->left()->bounds().lower;
Handle<Type> right_type = expr->right()->bounds().lower;
Handle<Type> result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+
+ return HGraphBuilder::BuildBinaryOperation(expr->op(), left, right,
+ left_type, right_type, result_type, fixed_right_arg);
+}
+
+
+HInstruction* HGraphBuilder::BuildBinaryOperation(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg,
+ bool binop_stub) {
+
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
- Representation result_rep = Representation::FromType(result_type);
- if (expr->op() != Token::ADD ||
- (left->type().IsNonString() && right->type().IsNonString())) {
- // For addition we can only truncate the arguments to number if we can
- // prove that we will not end up in string concatenation mode.
- left = TruncateToNumber(left, &left_type);
- right = TruncateToNumber(right, &right_type);
- }
+ bool maybe_string_add = op == Token::ADD &&
+ (left_type->Maybe(Type::String()) ||
+ right_type->Maybe(Type::String()));
if (left_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
Deoptimizer::SOFT);
- // TODO(rossberg): we should be able to get rid of non-continuous defaults.
+ // TODO(rossberg): we should be able to get rid of non-continuous
+ // defaults.
left_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
+ left_rep = Representation::FromType(left_type);
}
+
if (right_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
+ right_rep = Representation::FromType(right_type);
+ }
+
+ // Special case for string addition here.
+ if (op == Token::ADD &&
+ (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
+ if (left_type->Is(Type::String())) {
+ IfBuilder if_isstring(this);
+ if_isstring.If<HIsStringAndBranch>(left);
+ if_isstring.Then();
+ if_isstring.ElseDeopt("Expected string for LHS of binary operation");
+ } else if (left_type->Is(Type::Number())) {
+ left = BuildNumberToString(left, left_type);
+ } else {
+ ASSERT(right_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ return NewUncasted<HInvokeFunction>(function, 2);
+ }
+
+ if (right_type->Is(Type::String())) {
+ IfBuilder if_isstring(this);
+ if_isstring.If<HIsStringAndBranch>(right);
+ if_isstring.Then();
+ if_isstring.ElseDeopt("Expected string for RHS of binary operation");
+ } else if (right_type->Is(Type::Number())) {
+ right = BuildNumberToString(right, right_type);
+ } else {
+ ASSERT(left_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ return NewUncasted<HInvokeFunction>(function, 2);
+ }
+
+ return NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
+ }
+
+ if (binop_stub) {
+ left = EnforceNumberType(left, left_type);
+ right = EnforceNumberType(right, right_type);
}
+
+ Representation result_rep = Representation::FromType(result_type);
+
+ bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
+ (right_rep.IsTagged() && !right_rep.IsSmi());
+
HInstruction* instr = NULL;
- switch (expr->op()) {
- case Token::ADD:
- if (left_type->Is(Type::String()) && right_type->Is(Type::String())) {
- BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- instr = HStringAdd::New(zone(), context, left, right);
- } else {
- instr = HAdd::New(zone(), context, left, right);
- }
- break;
- case Token::SUB:
- instr = HSub::New(zone(), context, left, right);
- break;
- case Token::MUL:
- instr = HMul::New(zone(), context, left, right);
- break;
- case Token::MOD:
- instr = HMod::New(zone(), context, left, right, fixed_right_arg);
- break;
- case Token::DIV:
- instr = HDiv::New(zone(), context, left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(expr->op(), left, right);
- break;
- case Token::BIT_OR: {
- HValue* operand, *shift_amount;
- if (left_type->Is(Type::Signed32()) &&
- right_type->Is(Type::Signed32()) &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = new(zone()) HRor(context, operand, shift_amount);
- } else {
- instr = NewUncasted<HBitwise>(expr->op(), left, right);
+ // Only the stub is allowed to call into the runtime, since otherwise we would
+ // inline several instructions (including the two pushes) for every tagged
+ // operation in optimized code, which is more expensive, than a stub call.
+ if (binop_stub && is_non_primitive) {
+ HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ instr = NewUncasted<HInvokeFunction>(function, 2);
+ } else {
+ switch (op) {
+ case Token::ADD:
+ instr = NewUncasted<HAdd>(left, right);
+ break;
+ case Token::SUB:
+ instr = NewUncasted<HSub>(left, right);
+ break;
+ case Token::MUL:
+ instr = NewUncasted<HMul>(left, right);
+ break;
+ case Token::MOD:
+ instr = NewUncasted<HMod>(left, right, fixed_right_arg);
+ break;
+ case Token::DIV:
+ instr = NewUncasted<HDiv>(left, right);
+ break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ instr = NewUncasted<HBitwise>(op, left, right);
+ break;
+ case Token::BIT_OR: {
+ HValue* operand, *shift_amount;
+ if (left_type->Is(Type::Signed32()) &&
+ right_type->Is(Type::Signed32()) &&
+ MatchRotateRight(left, right, &operand, &shift_amount)) {
+ instr = NewUncasted<HRor>(operand, shift_amount);
+ } else {
+ instr = NewUncasted<HBitwise>(op, left, right);
+ }
+ break;
}
- break;
+ case Token::SAR:
+ instr = NewUncasted<HSar>(left, right);
+ break;
+ case Token::SHR:
+ instr = NewUncasted<HShr>(left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
+ CanBeZero(right)) {
+ graph()->RecordUint32Instruction(instr);
+ }
+ break;
+ case Token::SHL:
+ instr = NewUncasted<HShl>(left, right);
+ break;
+ default:
+ UNREACHABLE();
}
- case Token::SAR:
- instr = HSar::New(zone(), context, left, right);
- break;
- case Token::SHR:
- instr = HShr::New(zone(), context, left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
- CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
- }
- break;
- case Token::SHL:
- instr = HShl::New(zone(), context, left, right);
- break;
- default:
- UNREACHABLE();
}
if (instr->IsBinaryOperation()) {
@@ -7785,6 +7937,19 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
+ if (binop_stub) {
+ // Stub should not call into stub.
+ instr->SetFlag(HValue::kCannotBeTagged);
+ // And should truncate on HForceRepresentation already.
+ if (left->IsForceRepresentation()) {
+ left->CopyFlag(HValue::kTruncatingToSmi, instr);
+ left->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ if (right->IsForceRepresentation()) {
+ right->CopyFlag(HValue::kTruncatingToSmi, instr);
+ right->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ }
}
return instr;
}
@@ -7874,9 +8039,9 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
HBasicBlock* eval_right = graph()->CreateBasicBlock();
ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
- ? new(zone()) HBranch(left_value, expected, eval_right, empty_block)
- : new(zone()) HBranch(left_value, expected, empty_block, eval_right);
- current_block()->Finish(test);
+ ? New<HBranch>(left_value, expected, eval_right, empty_block)
+ : New<HBranch>(left_value, expected, empty_block, eval_right);
+ FinishCurrentBlock(test);
set_current_block(eval_right);
Drop(1); // Value of the left subexpression.
@@ -7933,10 +8098,10 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
HInstruction* instr = BuildBinaryOperation(expr, left, right);
- instr->set_position(expr->position());
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -7945,9 +8110,9 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(sub_expr));
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
HValue* value = Pop();
- HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
- instr->set_position(expr->position());
+ HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
return ast_context()->ReturnControl(instr, expr->id());
}
@@ -7969,6 +8134,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
// the expression stack when one side is a special-case literal.
@@ -7991,9 +8158,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->value());
- HClassOfTestAndBranch* instr =
- new(zone()) HClassOfTestAndBranch(value, rhs);
- instr->set_position(expr->position());
+ HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs);
return ast_context()->ReturnControl(instr, expr->id());
}
@@ -8007,7 +8172,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- HValue* context = environment()->context();
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
@@ -8015,7 +8179,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (IsLiteralCompareBool(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
@@ -8046,14 +8209,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
- HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
- result->set_position(expr->position());
+ HInstanceOf* result = New<HInstanceOf>(left, right);
return ast_context()->ReturnInstruction(result, expr->id());
} else {
Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
- new(zone()) HInstanceOfKnownGlobal(context, left, target);
- result->set_position(expr->position());
+ New<HInstanceOfKnownGlobal>(left, target);
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -8065,8 +8226,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Add<HPushArgument>(right);
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
- HInstruction* result = new(zone()) HInvokeFunction(context, function, 2);
- result->set_position(expr->position());
+ HInstruction* result = New<HInvokeFunction>(function, 2);
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -8090,16 +8250,14 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
AddCheckMap(right, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
}
}
@@ -8109,26 +8267,30 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
} else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
+ return ast_context()->ReturnControl(result, expr->id());
+ } else if (combined_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
+ BuildCheckHeapObject(right);
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
+ HStringCompareAndBranch* result =
+ New<HStringCompareAndBranch>(left, right, op);
return ast_context()->ReturnControl(result, expr->id());
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result =
- new(zone()) HCompareGeneric(context, left, right, op);
+ HCompareGeneric* result = New<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
- result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareNumericAndBranch* result =
- new(zone()) HCompareNumericAndBranch(left, right, op);
+ New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
}
@@ -8142,6 +8304,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -8150,7 +8313,6 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
: graph()->GetConstantUndefined();
HCompareObjectEqAndBranch* instr =
New<HCompareObjectEqAndBranch>(value, nil_constant);
- instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
} else {
ASSERT_EQ(Token::EQ, expr->op());
@@ -8158,7 +8320,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
? handle(Type::Any(), isolate_)
: expr->combined_type();
HIfContinuation continuation;
- BuildCompareNil(value, type, expr->position(), &continuation);
+ BuildCompareNil(value, type, &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
}
@@ -8171,49 +8333,28 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
return New<HConstant>(
function_state()->compilation_info()->closure());
} else {
- return new(zone()) HThisFunction;
+ return New<HThisFunction>();
}
}
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
Handle<JSObject> boilerplate_object,
- Handle<Object> allocation_site_object,
- AllocationSiteMode mode) {
+ AllocationSiteContext* site_context) {
NoObservableSideEffectsScope no_effects(this);
-
- Handle<FixedArrayBase> elements(boilerplate_object->elements());
- int object_size = boilerplate_object->map()->instance_size();
- int object_offset = object_size;
-
InstanceType instance_type = boilerplate_object->map()->instance_type();
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- AllocationSite::CanTrack(instance_type);
-
- // If using allocation sites, then the payload on the site should already
- // be filled in as a valid (boilerplate) array.
- ASSERT(!create_allocation_site_info ||
- AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
-
- if (create_allocation_site_info) {
- object_size += AllocationMemento::kSize;
- }
-
ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
+
HType type = instance_type == JS_ARRAY_TYPE
? HType::JSArray() : HType::JSObject();
- HValue* object_size_constant = Add<HConstant>(object_size);
+ HValue* object_size_constant = Add<HConstant>(
+ boilerplate_object->map()->instance_size());
HInstruction* object = Add<HAllocate>(object_size_constant, type,
isolate()->heap()->GetPretenureMode(), instance_type);
-
BuildEmitObjectHeader(boilerplate_object, object);
- if (create_allocation_site_info) {
- HInstruction* allocation_site = Add<HConstant>(allocation_site_object);
- BuildCreateAllocationMemento(object, object_offset, allocation_site);
- }
-
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
@@ -8231,15 +8372,15 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
}
BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
-
// Copy object elements if non-COW.
if (object_elements != NULL) {
- BuildEmitElements(boilerplate_object, elements, object_elements);
+ BuildEmitElements(boilerplate_object, elements, object_elements,
+ site_context);
}
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- BuildEmitInObjectProperties(boilerplate_object, object);
+ BuildEmitInObjectProperties(boilerplate_object, object, site_context);
}
return object;
}
@@ -8291,7 +8432,8 @@ void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
- HInstruction* object) {
+ HInstruction* object,
+ AllocationSiteContext* site_context) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -8315,9 +8457,10 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
HInstruction* result =
- BuildFastLiteral(value_object,
- Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ BuildFastLiteral(value_object, site_context);
+ site_context->ExitScope(current_site, value_object);
Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
@@ -8326,6 +8469,12 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+ // TODO(mvstanton): This heap number alloc does not have a corresponding
+ // AllocationSite. That is okay because
+ // 1) it's a child object of another object with a valid allocation site
+ // 2) we can just use the mode of the parent object for pretenuring
+ // The todo is replace GetPretenureMode() with
+ // site_context->top()->GetPretenureMode().
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
@@ -8355,7 +8504,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
void HOptimizedGraphBuilder::BuildEmitElements(
Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- HValue* object_elements) {
+ HValue* object_elements,
+ AllocationSiteContext* site_context) {
ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
@@ -8365,7 +8515,8 @@ void HOptimizedGraphBuilder::BuildEmitElements(
if (elements->IsFixedDoubleArray()) {
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
- BuildEmitFixedArray(elements, kind, object_elements);
+ BuildEmitFixedArray(elements, kind, object_elements,
+ site_context);
} else {
UNREACHABLE();
}
@@ -8394,7 +8545,8 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
ElementsKind kind,
- HValue* object_elements) {
+ HValue* object_elements,
+ AllocationSiteContext* site_context) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
@@ -8403,9 +8555,10 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
HInstruction* result =
- BuildFastLiteral(value_object,
- Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ BuildFastLiteral(value_object, site_context);
+ site_context->ExitScope(current_site, value_object);
Add<HStoreKeyed>(object_elements, key_constant, result, kind);
} else {
HInstruction* value_instruction =
@@ -8568,7 +8721,7 @@ void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+ HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8578,9 +8731,9 @@ void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ New<HHasInstanceTypeAndBranch>(value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8590,7 +8743,7 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8600,7 +8753,7 @@ void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasCachedArrayIndexAndBranch* result =
- new(zone()) HHasCachedArrayIndexAndBranch(value);
+ New<HHasCachedArrayIndexAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8610,7 +8763,7 @@ void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8620,7 +8773,7 @@ void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8629,7 +8782,7 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+ HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8643,8 +8796,7 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsUndetectableAndBranch* result =
- new(zone()) HIsUndetectableAndBranch(value);
+ HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8665,7 +8817,7 @@ void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
- return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
+ return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(),
call->id());
}
}
@@ -8695,8 +8847,8 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HInstruction* checked_index = Add<HBoundsCheck>(index, length);
- HAccessArgumentsAt* result =
- new(zone()) HAccessArgumentsAt(elements, length, checked_index);
+ HAccessArgumentsAt* result = New<HAccessArgumentsAt>(
+ elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8713,7 +8865,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HValueOf* result = new(zone()) HValueOf(value);
+ HValueOf* result = New<HValueOf>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8724,7 +8876,7 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
- HDateField* result = new(zone()) HDateField(date, index);
+ HDateField* result = New<HDateField>(date, index);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8738,7 +8890,7 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
HValue* value = Pop();
HValue* index = Pop();
HValue* string = Pop();
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
+ HSeqStringSetChar* result = New<HSeqStringSetChar>(
String::ONE_BYTE_ENCODING, string, index, value);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8753,7 +8905,7 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
HValue* value = Pop();
HValue* index = Pop();
HValue* string = Pop();
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
+ HSeqStringSetChar* result = New<HSeqStringSetChar>(
String::TWO_BYTE_ENCODING, string, index, value);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8766,31 +8918,28 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
HValue* value = Pop();
HValue* object = Pop();
// Check if object is a not a smi.
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
HBasicBlock* if_smi = graph()->CreateBasicBlock();
HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
- smicheck->SetSuccessorAt(0, if_smi);
- smicheck->SetSuccessorAt(1, if_heap_object);
- current_block()->Finish(smicheck);
- if_smi->Goto(join);
+ FinishCurrentBlock(New<HIsSmiAndBranch>(object, if_smi, if_heap_object));
+ Goto(if_smi, join);
// Check if object is a JSValue.
set_current_block(if_heap_object);
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
+ New<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
HBasicBlock* if_js_value = graph()->CreateBasicBlock();
HBasicBlock* not_js_value = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_js_value);
typecheck->SetSuccessorAt(1, not_js_value);
- current_block()->Finish(typecheck);
- not_js_value->Goto(join);
+ FinishCurrentBlock(typecheck);
+ Goto(not_js_value, join);
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
Add<HStoreNamedField>(object,
HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value);
- if_js_value->Goto(join);
+ Goto(if_js_value, join);
join->SetJoinId(call->id());
set_current_block(join);
return ast_context()->ReturnValue(value);
@@ -8814,7 +8963,7 @@ void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
- HInstruction* result = New<HStringCharFromCode>(char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8828,7 +8977,7 @@ void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
- HInstruction* result = New<HStringCharFromCode>(char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8855,7 +9004,7 @@ void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
// Fast support for Math.random().
void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
HGlobalObject* global_object = Add<HGlobalObject>();
- HRandom* result = new(zone()) HRandom(global_object);
+ HRandom* result = New<HRandom>(global_object);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8867,9 +9016,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HValue* context = environment()->context();
- HInstruction* result = HStringAdd::New(
- zone(), context, left, right, STRING_ADD_CHECK_BOTH);
+ HInstruction* result = New<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8878,8 +9025,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
+ HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8889,9 +9035,7 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
+ HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
Drop(2);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8901,8 +9045,7 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
+ HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
Drop(4);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8912,9 +9055,7 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
+ HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3);
Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8929,12 +9070,11 @@ void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
// Fast support for number to string.
void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* number = Pop();
+ HValue* result = BuildNumberToString(
+ number, handle(Type::Number(), isolate()));
+ return ast_context()->ReturnValue(result);
}
@@ -8953,25 +9093,25 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// Branch for function proxies, or other non-functions.
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+ New<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_jsfunction);
typecheck->SetSuccessorAt(1, if_nonfunction);
- current_block()->Finish(typecheck);
+ FinishCurrentBlock(typecheck);
set_current_block(if_jsfunction);
HInstruction* invoke_result = Add<HInvokeFunction>(function, arg_count);
Drop(arg_count);
Push(invoke_result);
- if_jsfunction->Goto(join);
+ Goto(if_jsfunction, join);
set_current_block(if_nonfunction);
HInstruction* call_result = Add<HCallFunction>(function, arg_count);
Drop(arg_count);
Push(call_result);
- if_nonfunction->Goto(join);
+ Goto(if_nonfunction, join);
set_current_block(join);
join->SetJoinId(call->id());
@@ -8986,7 +9126,7 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* result = HPower::New(zone(), context(), left, right);
+ HInstruction* result = NewUncasted<HPower>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8994,9 +9134,7 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::SIN);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
@@ -9006,9 +9144,7 @@ void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::COS);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
@@ -9018,9 +9154,7 @@ void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::TAN);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
@@ -9030,9 +9164,7 @@ void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
@@ -9043,9 +9175,7 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HValue* context = environment()->context();
- HInstruction* result =
- HUnaryMathOperation::New(zone(), context, value, kMathSqrt);
+ HInstruction* result = New<HUnaryMathOperation>(value, kMathSqrt);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9060,7 +9190,7 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
+ HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9083,7 +9213,7 @@ void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
CallRuntime* call) {
- AddInstruction(new(zone()) HDebugBreak());
+ Add<HDebugBreak>();
return ast_context()->ReturnValue(graph()->GetConstant0());
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index c1dafa8b5a..b5046bd001 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -30,6 +30,7 @@
#include "v8.h"
+#include "accessors.h"
#include "allocation.h"
#include "ast.h"
#include "compiler.h"
@@ -109,7 +110,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr);
+ void AddInstruction(HInstruction* instr, int position);
bool Dominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
@@ -132,30 +133,18 @@ class HBasicBlock V8_FINAL : public ZoneObject {
void SetJoinId(BailoutId ast_id);
- void Finish(HControlInstruction* last);
- void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block,
- FunctionState* state = NULL,
- bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block) {
- Goto(block, NULL, false);
- }
-
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
+ int position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
- AddInstruction(instr);
+ AddInstruction(instr, position);
return instr;
}
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
- // Add the inlined function exit sequence, adding an HLeaveInlined
- // instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, FunctionState* state);
-
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
//
@@ -169,8 +158,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
}
HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
- bool IsDeoptimizing() const { return is_deoptimizing_; }
- void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
+ bool IsDeoptimizing() const {
+ return end() != NULL && end()->IsDeoptimize();
+ }
+
+ void MarkUnreachable();
+ bool IsUnreachable() const { return !is_reachable_; }
+ bool IsReachable() const { return is_reachable_; }
bool IsLoopSuccessorDominator() const {
return dominates_loop_successors_;
@@ -185,14 +179,30 @@ class HBasicBlock V8_FINAL : public ZoneObject {
void Verify();
#endif
- private:
+ protected:
friend class HGraphBuilder;
+ HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
+ void Finish(HControlInstruction* last, int position);
+ void FinishExit(HControlInstruction* instruction, int position);
+ void Goto(HBasicBlock* block,
+ int position,
+ FunctionState* state = NULL,
+ bool add_simulate = true);
+ void GotoNoSimulate(HBasicBlock* block, int position) {
+ Goto(block, position, NULL, false);
+ }
+
+ // Add the inlined function exit sequence, adding an HLeaveInlined
+ // instruction and updating the bailout environment.
+ void AddLeaveInlined(HValue* return_value,
+ FunctionState* state,
+ int position);
+
+ private:
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
-
int block_id_;
HGraph* graph_;
ZoneList<HPhi*> phis_;
@@ -214,7 +224,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// For blocks marked as inline return target: the block with HEnterInlined.
HBasicBlock* inlined_entry_block_;
bool is_inline_return_target_ : 1;
- bool is_deoptimizing_ : 1;
+ bool is_reachable_ : 1;
bool dominates_loop_successors_ : 1;
bool is_osr_entry_ : 1;
};
@@ -316,7 +326,7 @@ class HGraph V8_FINAL : public ZoneObject {
HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
- void FinalizeUniqueValueIds();
+ void FinalizeUniqueness();
bool ProcessArgumentsObject();
void OrderBlocks();
void AssignDominators();
@@ -332,10 +342,7 @@ class HGraph V8_FINAL : public ZoneObject {
void CollectPhis();
- void set_undefined_constant(HConstant* constant) {
- undefined_constant_.set(constant);
- }
- HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
+ HConstant* GetConstantUndefined();
HConstant* GetConstant0();
HConstant* GetConstant1();
HConstant* GetConstantMinus1();
@@ -405,14 +412,6 @@ class HGraph V8_FINAL : public ZoneObject {
use_optimistic_licm_ = value;
}
- bool has_soft_deoptimize() {
- return has_soft_deoptimize_;
- }
-
- void set_has_soft_deoptimize(bool value) {
- has_soft_deoptimize_ = value;
- }
-
void MarkRecursive() {
is_recursive_ = true;
}
@@ -458,6 +457,7 @@ class HGraph V8_FINAL : public ZoneObject {
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
private:
+ HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
@@ -477,7 +477,7 @@ class HGraph V8_FINAL : public ZoneObject {
ZoneList<HValue*> values_;
ZoneList<HPhi*>* phi_list_;
ZoneList<HInstruction*>* uint32_instructions_;
- SetOncePointer<HConstant> undefined_constant_;
+ SetOncePointer<HConstant> constant_undefined_;
SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_minus1_;
@@ -495,7 +495,6 @@ class HGraph V8_FINAL : public ZoneObject {
bool is_recursive_;
bool use_optimistic_licm_;
- bool has_soft_deoptimize_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
int maximum_environment_size_;
@@ -941,26 +940,26 @@ class FunctionState V8_FINAL {
class HIfContinuation V8_FINAL {
public:
- HIfContinuation() { continuation_captured_ = false; }
+ HIfContinuation() : continuation_captured_(false) {}
+ HIfContinuation(HBasicBlock* true_branch,
+ HBasicBlock* false_branch)
+ : continuation_captured_(true), true_branch_(true_branch),
+ false_branch_(false_branch) {}
~HIfContinuation() { ASSERT(!continuation_captured_); }
void Capture(HBasicBlock* true_branch,
- HBasicBlock* false_branch,
- int position) {
+ HBasicBlock* false_branch) {
ASSERT(!continuation_captured_);
true_branch_ = true_branch;
false_branch_ = false_branch;
- position_ = position;
continuation_captured_ = true;
}
void Continue(HBasicBlock** true_branch,
- HBasicBlock** false_branch,
- int* position) {
+ HBasicBlock** false_branch) {
ASSERT(continuation_captured_);
*true_branch = true_branch_;
*false_branch = false_branch_;
- if (position != NULL) *position = position_;
continuation_captured_ = false;
}
@@ -970,10 +969,13 @@ class HIfContinuation V8_FINAL {
return IsTrueReachable() || IsFalseReachable();
}
+ HBasicBlock* true_branch() const { return true_branch_; }
+ HBasicBlock* false_branch() const { return false_branch_; }
+
+ private:
bool continuation_captured_;
HBasicBlock* true_branch_;
HBasicBlock* false_branch_;
- int position_;
};
@@ -982,7 +984,8 @@ class HGraphBuilder {
explicit HGraphBuilder(CompilationInfo* info)
: info_(info),
graph_(NULL),
- current_block_(NULL) {}
+ current_block_(NULL),
+ position_(RelocInfo::kNoPosition) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -1005,6 +1008,34 @@ class HGraphBuilder {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
+ void FinishCurrentBlock(HControlInstruction* last);
+ void FinishExitCurrentBlock(HControlInstruction* instruction);
+
+ void Goto(HBasicBlock* from,
+ HBasicBlock* target,
+ FunctionState* state = NULL,
+ bool add_simulate = true) {
+ from->Goto(target, position_, state, add_simulate);
+ }
+ void Goto(HBasicBlock* target,
+ FunctionState* state = NULL,
+ bool add_simulate = true) {
+ Goto(current_block(), target, state, add_simulate);
+ }
+ void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) {
+ Goto(from, target, NULL, false);
+ }
+ void GotoNoSimulate(HBasicBlock* target) {
+ Goto(target, NULL, false);
+ }
+ void AddLeaveInlined(HBasicBlock* block,
+ HValue* return_value,
+ FunctionState* state) {
+ block->AddLeaveInlined(return_value, state, position_);
+ }
+ void AddLeaveInlined(HValue* return_value, FunctionState* state) {
+ return AddLeaveInlined(current_block(), return_value, state);
+ }
template<class I>
HInstruction* NewUncasted() { return I::New(zone(), context()); }
@@ -1199,6 +1230,8 @@ class HGraphBuilder {
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
+ int position() const { return position_; }
+
protected:
virtual bool BuildGraph() = 0;
@@ -1228,6 +1261,8 @@ class HGraphBuilder {
ElementsKind to_kind,
bool is_jsarray);
+ HValue* BuildNumberToString(HValue* object, Handle<Type> type);
+
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
@@ -1238,46 +1273,50 @@ class HGraphBuilder {
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
- HInstruction* AddExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
-
- HInstruction* AddFastElementAccess(
+ HInstruction* AddElementAccess(
HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode);
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
HLoadNamedField* AddLoadElements(HValue* object);
+
+ bool MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount);
+
+ HInstruction* BuildBinaryOperation(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg,
+ bool binop_stub = false);
+
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
+ HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
- void PushAndAdd(HInstruction* instr);
-
void FinishExitWithHardDeoptimization(const char* reason,
HBasicBlock* continuation);
- void AddIncrementCounter(StatsCounter* counter,
- HValue* context);
+ void AddIncrementCounter(StatsCounter* counter);
class IfBuilder V8_FINAL {
public:
- explicit IfBuilder(HGraphBuilder* builder,
- int position = RelocInfo::kNoPosition);
+ explicit IfBuilder(HGraphBuilder* builder);
IfBuilder(HGraphBuilder* builder,
HIfContinuation* continuation);
@@ -1286,80 +1325,79 @@ class HGraphBuilder {
}
template<class Condition>
- HInstruction* If(HValue *p) {
- HControlInstruction* compare = new(zone()) Condition(p);
+ Condition* If(HValue *p) {
+ Condition* compare = builder()->New<Condition>(p);
AddCompare(compare);
return compare;
}
template<class Condition, class P2>
- HInstruction* If(HValue* p1, P2 p2) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2);
+ Condition* If(HValue* p1, P2 p2) {
+ Condition* compare = builder()->New<Condition>(p1, p2);
AddCompare(compare);
return compare;
}
template<class Condition, class P2, class P3>
- HInstruction* If(HValue* p1, P2 p2, P3 p3) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
+ Condition* If(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = builder()->New<Condition>(p1, p2, p3);
AddCompare(compare);
return compare;
}
+ template<class Condition>
+ Condition* IfNot(HValue* p) {
+ Condition* compare = If<Condition>(p);
+ compare->Not();
+ return compare;
+ }
+
template<class Condition, class P2>
- HInstruction* IfNot(HValue* p1, P2 p2) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2);
- AddCompare(compare);
- HBasicBlock* block0 = compare->SuccessorAt(0);
- HBasicBlock* block1 = compare->SuccessorAt(1);
- compare->SetSuccessorAt(0, block1);
- compare->SetSuccessorAt(1, block0);
+ Condition* IfNot(HValue* p1, P2 p2) {
+ Condition* compare = If<Condition>(p1, p2);
+ compare->Not();
return compare;
}
template<class Condition, class P2, class P3>
- HInstruction* IfNot(HValue* p1, P2 p2, P3 p3) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
- AddCompare(compare);
- HBasicBlock* block0 = compare->SuccessorAt(0);
- HBasicBlock* block1 = compare->SuccessorAt(1);
- compare->SetSuccessorAt(0, block1);
- compare->SetSuccessorAt(1, block0);
+ Condition* IfNot(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = If<Condition>(p1, p2, p3);
+ compare->Not();
return compare;
}
template<class Condition>
- HInstruction* OrIf(HValue *p) {
+ Condition* OrIf(HValue *p) {
Or();
return If<Condition>(p);
}
template<class Condition, class P2>
- HInstruction* OrIf(HValue* p1, P2 p2) {
+ Condition* OrIf(HValue* p1, P2 p2) {
Or();
return If<Condition>(p1, p2);
}
template<class Condition, class P2, class P3>
- HInstruction* OrIf(HValue* p1, P2 p2, P3 p3) {
+ Condition* OrIf(HValue* p1, P2 p2, P3 p3) {
Or();
return If<Condition>(p1, p2, p3);
}
template<class Condition>
- HInstruction* AndIf(HValue *p) {
+ Condition* AndIf(HValue *p) {
And();
return If<Condition>(p);
}
template<class Condition, class P2>
- HInstruction* AndIf(HValue* p1, P2 p2) {
+ Condition* AndIf(HValue* p1, P2 p2) {
And();
return If<Condition>(p1, p2);
}
template<class Condition, class P2, class P3>
- HInstruction* AndIf(HValue* p1, P2 p2, P3 p3) {
+ Condition* AndIf(HValue* p1, P2 p2, P3 p3) {
And();
return If<Condition>(p1, p2, p3);
}
@@ -1367,8 +1405,50 @@ class HGraphBuilder {
void Or();
void And();
+ // Captures the current state of this IfBuilder in the specified
+ // continuation and ends this IfBuilder.
void CaptureContinuation(HIfContinuation* continuation);
+ // Joins the specified continuation from this IfBuilder and ends this
+ // IfBuilder. This appends a Goto instruction from the true branch of
+ // this IfBuilder to the true branch of the continuation unless the
+ // true branch of this IfBuilder is already finished. And vice versa
+ // for the false branch.
+ //
+ // The basic idea is as follows: You have several nested IfBuilder's
+ // that you want to join based on two possible outcomes (i.e. success
+ // and failure, or whatever). You can do this easily using this method
+ // now, for example:
+ //
+ // HIfContinuation cont(graph()->CreateBasicBlock(),
+ // graph()->CreateBasicBlock());
+ // ...
+ // IfBuilder if_whatever(this);
+ // if_whatever.If<Condition>(arg);
+ // if_whatever.Then();
+ // ...
+ // if_whatever.Else();
+ // ...
+ // if_whatever.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_something(this);
+ // if_something.If<Condition>(arg1, arg2);
+ // if_something.Then();
+ // ...
+ // if_something.Else();
+ // ...
+ // if_something.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_finally(this, &cont);
+ // if_finally.Then();
+ // // continues after then code of if_whatever or if_something.
+ // ...
+ // if_finally.Else();
+ // // continues after else code of if_whatever or if_something.
+ // ...
+ // if_finally.End();
+ void JoinContinuation(HIfContinuation* continuation);
+
void Then();
void Else();
void End();
@@ -1382,12 +1462,11 @@ class HGraphBuilder {
void Return(HValue* value);
private:
- void AddCompare(HControlInstruction* compare);
+ HControlInstruction* AddCompare(HControlInstruction* compare);
- Zone* zone() { return builder_->zone(); }
+ HGraphBuilder* builder() const { return builder_; }
HGraphBuilder* builder_;
- int position_;
bool finished_ : 1;
bool deopt_then_ : 1;
bool deopt_else_ : 1;
@@ -1548,7 +1627,6 @@ class HGraphBuilder {
void BuildCompareNil(
HValue* value,
Handle<Type> type,
- int position,
HIfContinuation* continuation);
HValue* BuildCreateAllocationMemento(HValue* previous_object,
@@ -1563,6 +1641,12 @@ class HGraphBuilder {
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetArrayFunction();
+ protected:
+ void SetSourcePosition(int position) {
+ ASSERT(position != RelocInfo::kNoPosition);
+ position_ = position;
+ }
+
private:
HGraphBuilder();
@@ -1572,6 +1656,7 @@ class HGraphBuilder {
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
+ int position_;
};
@@ -1583,13 +1668,14 @@ inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
if (FLAG_always_opt) return NULL;
}
if (current_block()->IsDeoptimizing()) return NULL;
- HDeoptimize* instr = New<HDeoptimize>(reason, type);
- AddInstruction(instr);
+ HBasicBlock* after_deopt_block = CreateBasicBlock(
+ current_block()->last_environment());
+ HDeoptimize* instr = New<HDeoptimize>(reason, type, after_deopt_block);
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_inserted()->Increment();
- graph()->set_has_soft_deoptimize(true);
}
- current_block()->MarkAsDeoptimizing();
+ FinishCurrentBlock(instr);
+ set_current_block(after_deopt_block);
return instr;
}
@@ -1622,7 +1708,7 @@ inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
int num_parameters = graph()->info()->num_parameters();
HValue* params = AddUncasted<HConstant>(num_parameters);
HReturn* return_instruction = New<HReturn>(value, params);
- current_block()->FinishExit(return_instruction);
+ FinishExitCurrentBlock(return_instruction);
return return_instruction;
}
@@ -1634,13 +1720,29 @@ inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
+ Handle<String> name,
+ const Runtime::Function* c_function,
+ int argument_count) {
+ HCallRuntime* instr = New<HCallRuntime>(name, c_function, argument_count);
+ if (graph()->info()->IsStub()) {
+ // When compiling code stubs, we don't want to save all double registers
+ // upon entry to the stub, but instead have the call runtime instruction
+ // save the double registers only on-demand (in the fallback case).
+ instr->set_save_doubles(kSaveFPRegs);
+ }
+ AddInstruction(instr);
+ return instr;
+}
+
+
+template<>
inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
return HContext::New(zone());
}
-class HOptimizedGraphBuilder V8_FINAL
- : public HGraphBuilder, public AstVisitor {
+class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
@@ -1707,6 +1809,8 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* context() { return environment()->context(); }
+ HOsrBuilder* osr() const { return osr_; }
+
void Bailout(BailoutReason reason);
HBasicBlock* CreateJoin(HBasicBlock* first,
@@ -1725,7 +1829,7 @@ class HOptimizedGraphBuilder V8_FINAL
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- private:
+ protected:
// Type of a member function that generates inline code for a native function.
typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
(CallRuntime* call);
@@ -1812,6 +1916,12 @@ class HOptimizedGraphBuilder V8_FINAL
HBasicBlock* loop_successor,
HBasicBlock* break_block);
+ // Build a loop entry
+ HBasicBlock* BuildLoopEntry();
+
+ // Builds a loop entry respectful of OSR requirements
+ HBasicBlock* BuildLoopEntry(IterationStatement* statement);
+
HBasicBlock* JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block);
@@ -1837,21 +1947,22 @@ class HOptimizedGraphBuilder V8_FINAL
env->Bind(index, value);
if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
HEnvironmentMarker* bind =
- new(zone()) HEnvironmentMarker(HEnvironmentMarker::BIND, index);
- AddInstruction(bind);
+ Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
+ USE(bind);
#ifdef DEBUG
bind->set_closure(env->closure());
#endif
}
}
+
HValue* LookupAndMakeLive(Variable* var) {
HEnvironment* env = environment();
int index = env->IndexFor(var);
HValue* value = env->Lookup(index);
if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
HEnvironmentMarker* lookup =
- new(zone()) HEnvironmentMarker(HEnvironmentMarker::LOOKUP, index);
- AddInstruction(lookup);
+ Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
+ USE(lookup);
#ifdef DEBUG
lookup->set_closure(env->closure());
#endif
@@ -1889,6 +2000,7 @@ class HOptimizedGraphBuilder V8_FINAL
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ private:
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -1940,27 +2052,113 @@ class HOptimizedGraphBuilder V8_FINAL
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
- int position,
BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(int position,
+ void HandlePolymorphicLoadNamedField(BailoutId ast_id,
BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name);
- HInstruction* TryLoadPolymorphicAsMonomorphic(HValue* object,
- SmallMapList* types,
- Handle<String> name);
- void HandlePolymorphicStoreNamedField(int position,
- BailoutId assignment_id,
+
+ class PropertyAccessInfo {
+ public:
+ PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
+ : lookup_(isolate),
+ map_(map),
+ name_(name),
+ access_(HObjectAccess::ForMap()) { }
+
+ // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
+ // load named. It additionally fills in the fields necessary to generate the
+ // lookup code.
+ bool CanLoadMonomorphic();
+
+ // Checks whether all types behave uniform when loading name. If all maps
+ // behave the same, a single monomorphic load instruction can be emitted,
+ // guarded by a single map-checks instruction that whether the receiver is
+ // an instance of any of the types.
+ // This method skips the first type in types, assuming that this
+ // PropertyAccessInfo is built for types->first().
+ bool CanLoadAsMonomorphic(SmallMapList* types);
+
+ bool IsJSObjectFieldAccessor() {
+ int offset; // unused
+ return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
+ }
+
+ bool GetJSObjectFieldAccess(HObjectAccess* access) {
+ if (IsStringLength()) {
+ *access = HObjectAccess::ForStringLength();
+ return true;
+ } else if (IsArrayLength()) {
+ *access = HObjectAccess::ForArrayLength(map_->elements_kind());
+ return true;
+ } else {
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
+ *access = HObjectAccess::ForJSObjectOffset(offset);
+ return true;
+ }
+ return false;
+ }
+ }
+
+ bool has_holder() { return !holder_.is_null(); }
+
+ LookupResult* lookup() { return &lookup_; }
+ Handle<Map> map() { return map_; }
+ Handle<JSObject> holder() { return holder_; }
+ Handle<JSFunction> accessor() { return accessor_; }
+ Handle<Object> constant() { return constant_; }
+ HObjectAccess access() { return access_; }
+
+ private:
+ Isolate* isolate() { return lookup_.isolate(); }
+
+ bool IsStringLength() {
+ return map_->instance_type() < FIRST_NONSTRING_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool IsArrayLength() {
+ return map_->instance_type() == JS_ARRAY_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool LoadResult(Handle<Map> map);
+ bool LookupDescriptor();
+ bool LookupInPrototypes();
+ bool IsCompatibleForLoad(PropertyAccessInfo* other);
+
+ void GeneralizeRepresentation(Representation r) {
+ access_ = access_.WithRepresentation(
+ access_.representation().generalize(r));
+ }
+
+ LookupResult lookup_;
+ Handle<Map> map_;
+ Handle<String> name_;
+ Handle<JSObject> holder_;
+ Handle<JSFunction> accessor_;
+ Handle<Object> constant_;
+ HObjectAccess access_;
+ };
+
+ HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor = true);
+
+ void HandlePolymorphicStoreNamedField(BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
Handle<String> name);
- bool TryStorePolymorphicAsMonomorphic(int position,
- BailoutId assignment_id,
+ bool TryStorePolymorphicAsMonomorphic(BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
@@ -2009,8 +2207,6 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* key,
HValue* val,
SmallMapList* maps,
- BailoutId ast_id,
- int position,
bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2019,31 +2215,20 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* key,
HValue* val,
Expression* expr,
- BailoutId ast_id,
- int position,
bool is_store,
bool* has_side_effects);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
- HInstruction* BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder);
- HInstruction* BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Handle<Map> map);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
void BuildLoad(Property* property,
- int position,
BailoutId ast_id);
void PushLoad(Property* property,
HValue* object,
- HValue* key,
- int position);
+ HValue* key);
void BuildStoreForEffect(Expression* expression,
Property* prop,
@@ -2080,8 +2265,7 @@ class HOptimizedGraphBuilder V8_FINAL
HInstruction* BuildThisFunction();
HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
- Handle<Object> allocation_site,
- AllocationSiteMode mode);
+ AllocationSiteContext* site_context);
void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
HInstruction* object);
@@ -2091,11 +2275,13 @@ class HOptimizedGraphBuilder V8_FINAL
HInstruction* object_elements);
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
- HInstruction* object);
+ HInstruction* object,
+ AllocationSiteContext* site_context);
void BuildEmitElements(Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- HValue* object_elements);
+ HValue* object_elements,
+ AllocationSiteContext* site_context);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
@@ -2103,7 +2289,8 @@ class HOptimizedGraphBuilder V8_FINAL
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
- HValue* object_elements);
+ HValue* object_elements,
+ AllocationSiteContext* site_context);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -2112,11 +2299,6 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* receiver,
Handle<Map> receiver_map);
- bool MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount);
-
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 0ae19c8232..dbff6e5f52 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -464,7 +464,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
Handle<String> key = isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits"));
- if (resolved->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
@@ -477,7 +477,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
key = isolate->factory()->NewStringFromAscii(
CStrVector("maximumSignificantDigits"));
- if (resolved->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
@@ -855,7 +855,7 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::SimpleDateFormat*>(
obj->GetInternalField(0));
}
@@ -920,7 +920,7 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
@@ -981,7 +981,7 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("collator"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
@@ -1045,7 +1045,7 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
}
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
index 1798bbba7a..a64c7e6784 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/i18n.js
@@ -258,8 +258,8 @@ function addBoundMethod(obj, methodName, implementation, length) {
// DateTimeFormat.format needs to be 0 arg method, but can stil
// receive optional dateValue param. If one was provided, pass it
// along.
- if (arguments.length > 0) {
- return implementation(that, arguments[0]);
+ if (%_ArgumentsLength() > 0) {
+ return implementation(that, %_Arguments(0));
} else {
return implementation(that);
}
@@ -290,7 +290,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (service.match(GetServiceRE()) === null) {
+ if (IS_NULL(service.match(GetServiceRE()))) {
throw new $Error('Internal error, wrong service type: ' + service);
}
@@ -447,7 +447,7 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (service.match(GetServiceRE()) === null) {
+ if (IS_NULL(service.match(GetServiceRE()))) {
throw new $Error('Internal error, wrong service type: ' + service);
}
@@ -463,7 +463,7 @@ function lookupMatcher(service, requestedLocales) {
if (AVAILABLE_LOCALES[service][locale] !== undefined) {
// Return the resolved locale and extension.
var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE());
- var extension = (extensionMatch === null) ? '' : extensionMatch[0];
+ var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
@@ -535,7 +535,7 @@ function parseExtension(extension) {
* Converts parameter to an Object if possible.
*/
function toObject(value) {
- if (value === undefined || value === null) {
+ if (IS_NULL_OR_UNDEFINED(value)) {
throw new $TypeError('Value cannot be converted to an Object.');
}
@@ -733,7 +733,7 @@ function toTitleCaseWord(word) {
function canonicalizeLanguageTag(localeID) {
// null is typeof 'object' so we have to do extra check.
if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
- localeID === null) {
+ IS_NULL(localeID)) {
throw new $TypeError('Language ID should be string or object.');
}
@@ -978,8 +978,8 @@ function initializeCollator(collator, locales, options) {
* @constructor
*/
%SetProperty(Intl, 'Collator', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1038,7 +1038,7 @@ function initializeCollator(collator, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('collator', locales, arguments[1]);
+ return supportedLocalesOf('collator', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1207,8 +1207,8 @@ function initializeNumberFormat(numberFormat, locales, options) {
* @constructor
*/
%SetProperty(Intl, 'NumberFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1286,7 +1286,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('numberformat', locales, arguments[1]);
+ return supportedLocalesOf('numberformat', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1367,7 +1367,7 @@ function toLDMLString(options) {
ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
option = getOption('timeZoneName', 'string', ['short', 'long']);
- ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
+ ldmlString += appendToLDMLString(option, {short: 'z', long: 'zzzz'});
return ldmlString;
}
@@ -1440,16 +1440,16 @@ function fromLDMLString(ldmlString) {
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = ldmlString.match(/v{1,2}/g);
+ match = ldmlString.match(/z|zzzz/g);
options = appendToDateTimeObject(
- options, 'timeZoneName', match, {v: 'short', vv: 'long'});
+ options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
return options;
}
function appendToDateTimeObject(options, option, match, pairs) {
- if (match === null) {
+ if (IS_NULL(match)) {
if (!options.hasOwnProperty(option)) {
defineWEProperty(options, option, undefined);
}
@@ -1606,8 +1606,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* @constructor
*/
%SetProperty(Intl, 'DateTimeFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1685,7 +1685,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('dateformat', locales, arguments[1]);
+ return supportedLocalesOf('dateformat', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1751,7 +1751,7 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _ and / beside ASCII letters.
// All inputs should conform to Area/Location from now on.
var match = GetTimezoneNameCheckRE().exec(tzID);
- if (match === null) {
+ if (IS_NULL(match)) {
throw new $RangeError('Expected Area/Location for time zone, got ' + tzID);
}
@@ -1812,8 +1812,8 @@ function initializeBreakIterator(iterator, locales, options) {
* @constructor
*/
%SetProperty(Intl, 'v8BreakIterator', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1868,7 +1868,7 @@ function initializeBreakIterator(iterator, locales, options) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('breakiterator', locales, arguments[1]);
+ return supportedLocalesOf('breakiterator', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1971,12 +1971,12 @@ $Object.defineProperty($String.prototype, 'localeCompare', {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- if (this === undefined || this === null) {
+ if (IS_NULL_OR_UNDEFINED(this)) {
throw new $TypeError('Method invoked on undefined or null value.');
}
- var locales = arguments[1];
- var options = arguments[2];
+ var locales = %_Arguments(1);
+ var options = %_Arguments(2);
var collator = cachedOrNewService('collator', locales, options);
return compare(collator, this, that);
},
@@ -2003,8 +2003,8 @@ $Object.defineProperty($Number.prototype, 'toLocaleString', {
throw new $TypeError('Method invoked on an object that is not Number.');
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
var numberFormat = cachedOrNewService('numberformat', locales, options);
return formatNumber(numberFormat, this);
},
@@ -2049,8 +2049,8 @@ $Object.defineProperty($Date.prototype, 'toLocaleString', {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'any', 'all', 'dateformatall');
},
@@ -2074,8 +2074,8 @@ $Object.defineProperty($Date.prototype, 'toLocaleDateString', {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'date', 'date', 'dateformatdate');
},
@@ -2099,8 +2099,8 @@ $Object.defineProperty($Date.prototype, 'toLocaleTimeString', {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'time', 'time', 'dateformattime');
},
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 5a35b207f7..05cc23a71d 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -47,6 +47,7 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
@@ -190,6 +191,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
@@ -379,7 +387,8 @@ void Assembler::emit(Handle<Object> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)) {
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index e5456da474..0557ed8853 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -53,6 +53,7 @@ bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
+uint64_t CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
@@ -1131,30 +1132,21 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
} else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
}
+ emit(imm);
}
@@ -1178,6 +1170,9 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
test(op.reg(), imm);
return;
}
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1185,9 +1180,26 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
return;
}
EnsureSpace ensure_space(this);
@@ -1402,7 +1414,8 @@ void Assembler::call(Handle<Code> code,
TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
emit(code, rmode, ast_id);
}
@@ -2055,6 +2068,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2064,6 +2078,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2073,6 +2088,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2235,18 +2251,6 @@ void Assembler::prefetch(const Operand& src, int level) {
}
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2335,11 +2339,19 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
EMIT(imm8);
}
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2474,6 +2486,11 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ EMIT(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
void Assembler::Print() {
Disassembler::Decode(isolate(), stdout, buffer_, pc_);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 55eff93190..f46c6478db 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -535,32 +535,54 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
+ if (Check(f, cross_compile_)) return true;
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -852,7 +874,7 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
+ void test_b(Register reg, uint8_t imm8);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
@@ -995,6 +1017,10 @@ class Assembler : public AssemblerBase {
void cpuid();
+ // SSE instructions
+ void andps(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
+
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
@@ -1012,7 +1038,6 @@ class Assembler : public AssemblerBase {
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -1050,15 +1075,14 @@ class Assembler : public AssemblerBase {
}
}
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@@ -1136,16 +1160,14 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- byte byte_at(int pos) { return buffer_[pos]; }
+ byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst, Register src);
byte* addr_at(int pos) { return buffer_ + pos; }
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index a1597481aa..e5e6ec50d1 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -539,10 +539,12 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ popad();
__ ret(0);
@@ -561,6 +563,44 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// Enter an internal frame.
{
@@ -628,25 +668,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
- // TODO(mvstanton): We should save these regs, do this in a future
- // checkin.
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
@@ -1063,13 +1084,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- &not_cached);
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@@ -1326,6 +1345,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
#undef __
}
} // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a83c1ae91d..b6bbe04b33 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -82,7 +93,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -162,7 +173,7 @@ static void InitializeArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->stack_parameter_count_ = eax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -184,7 +195,7 @@ static void InitializeInternalArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->stack_parameter_count_ = eax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -283,6 +294,18 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -432,7 +455,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ __ movsd(Operand(esp, i * kDoubleSize), reg);
}
}
const int argument_count = 1;
@@ -448,7 +471,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ __ movsd(reg, Operand(esp, i * kDoubleSize));
}
__ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
@@ -470,18 +493,6 @@ class FloatingPointHelper : public AllStatic {
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -489,32 +500,11 @@ class FloatingPointHelper : public AllStatic {
Label* non_float,
Register scratch);
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
};
@@ -658,1259 +648,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats, not_int32, right_arg_changed;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ebx, ecx, xmm2);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm1, edi, ecx, xmm2);
- if (op_ == Token::MOD) {
- if (encoded_right_arg_.has_value) {
- __ cmp(edi, Immediate(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -2034,7 +771,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2049,7 +786,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
@@ -2062,17 +799,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
+ __ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
@@ -2098,13 +835,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
@@ -2221,79 +958,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- __ TruncateHeapNumberToI(edx, edx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- __ TruncateHeapNumberToI(ecx, eax);
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2320,7 +984,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -2329,109 +993,20 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ Cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -2470,7 +1045,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
+ __ Cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -2485,12 +1060,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
+ __ Cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2501,7 +1076,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2509,7 +1084,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
@@ -2604,9 +1179,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
+ __ movsd(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
+ __ movsd(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -2629,7 +1204,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -2683,7 +1258,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
+ __ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -2696,7 +1271,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
@@ -2704,8 +1279,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
@@ -2713,7 +1288,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
@@ -2756,8 +1331,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
}
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3495,7 +2069,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -3768,106 +2342,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -4205,6 +2679,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // eax : number of arguments to the construct function
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
@@ -4224,9 +2699,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ j(not_equal, &miss);
@@ -4265,6 +2739,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
@@ -4444,6 +2919,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
+ } else {
+ BinaryOpStub::GenerateAheadOfTime(isolate);
+ }
}
@@ -4508,6 +2989,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@@ -5455,33 +3938,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ ret(0);
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ ret(0);
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5517,12 +3978,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
__ bind(&done);
@@ -6253,24 +4709,24 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
+ __ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
+ __ Cvtsi2sd(xmm0, ecx);
__ bind(&done);
// Compare operands.
@@ -7300,9 +5756,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ inc(edx);
__ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ Assert(equal, kExpectedAllocationSiteInCell);
}
@@ -7447,8 +5902,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
- masm->isolate()->heap()->allocation_site_map())));
+ __ cmp(FieldOperand(edx, 0), Immediate(
+ masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 5c8eca37b5..006651c9c8 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -217,30 +217,6 @@ class StringCompareStub: public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -468,7 +444,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
@@ -480,7 +456,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 84a4d238bd..d09a85f8b1 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -117,7 +117,7 @@ UnaryMathFunction CreateExpFunction() {
CpuFeatureScope use_sse2(&masm, SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ movsd(input, Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ebx);
@@ -125,7 +125,7 @@ UnaryMathFunction CreateExpFunction() {
__ pop(ebx);
__ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ movsd(Operand(esp, 1 * kPointerSize), result);
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
@@ -155,9 +155,9 @@ UnaryMathFunction CreateSqrtFunction() {
// Move double input into registers.
{
CpuFeatureScope use_sse2(&masm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
@@ -462,10 +462,10 @@ OS::MemMoveFunction CreateMemMoveFunction() {
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
- __ movdbl(xmm0, Operand(src, 0));
- __ movdbl(xmm1, Operand(src, count, times_1, -8));
- __ movdbl(Operand(dst, 0), xmm0);
- __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
@@ -666,8 +666,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
}
// Set transitioned map.
@@ -694,8 +693,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -743,7 +741,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(the_hole_nan,
+ __ movsd(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
@@ -768,8 +766,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
@@ -789,7 +787,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -833,8 +831,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -899,9 +896,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0,
+ __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -1081,20 +1078,20 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
- __ movdbl(double_scratch, ExpConstant(0));
+ __ movsd(double_scratch, ExpConstant(0));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
__ j(above_equal, &done);
__ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
+ __ movsd(result, ExpConstant(2));
__ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
+ __ movsd(double_scratch, ExpConstant(3));
+ __ movsd(result, ExpConstant(4));
__ mulsd(double_scratch, input);
__ addsd(double_scratch, result);
__ movd(temp2, double_scratch);
__ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
+ __ movsd(result, ExpConstant(6));
__ mulsd(double_scratch, ExpConstant(5));
__ subsd(double_scratch, input);
__ subsd(result, double_scratch);
@@ -1111,7 +1108,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ shl(temp1, 20);
__ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
+ __ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
__ por(input, double_scratch);
__ mulsd(result, input);
@@ -1120,7 +1117,6 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static const int kNoCodeAgeSequenceLength = 5;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
@@ -1153,7 +1149,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
@@ -1165,16 +1161,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 13a70afe52..e339b3ad11 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// sub <profiling_counter>, <delta>
-// jns ok
-// call <interrupt stub>
-// ok:
-//
-// The patched back edge looks like this:
-//
-// sub <profiling_counter>, <delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -283,16 +202,14 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->environment_length();
output_frame->SetRegister(eax.code(), params);
output_frame->SetRegister(ebx.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
@@ -330,7 +247,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ __ movsd(Operand(esp, offset), xmm_reg);
}
}
@@ -382,8 +299,8 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
}
}
@@ -468,7 +385,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 01fa999645..13cf6bc49a 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -942,13 +942,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
@@ -1042,6 +1042,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x54) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("andps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (f0byte == 0x57) {
data += 2;
int mod, regop, rm;
@@ -1239,8 +1247,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 6d39cc1e6e..704fb4e7d2 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -158,10 +158,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1586,21 +1583,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -3316,7 +3307,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
} else {
// 0x4130000000000000 is 1.0 x 2^20 as a double.
__ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
@@ -3555,8 +3546,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -4897,6 +4888,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 327ac57623..f8e4ea53d0 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1304,7 +1304,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1423,7 +1423,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d50b780d71..46c87e1d62 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -120,24 +120,6 @@ void LCodeGen::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
@@ -206,15 +188,8 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
info()->AddNoFrameRange(0, masm_->pc_offset());
- __ push(esi); // Callee's context.
- if (info()->IsStub()) {
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- __ push(edi); // Callee's JS function.
- }
}
if (info()->IsOptimizing() &&
@@ -275,7 +250,7 @@ bool LCodeGen::GeneratePrologue() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
+ __ movsd(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
@@ -340,12 +315,41 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
// Save the first local, which is overwritten by the alignment state.
Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
__ push(alignment_loc);
- // Set the dynamic frame alignment state to "not aligned".
- __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
@@ -355,44 +359,27 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
-
- RecordAndUpdatePosition(instr->position());
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
+}
- instr->CompileToNative(this);
- if (!CpuFeatures::IsSupported(SSE2)) {
- if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
- !instr->IsGap() && !instr->IsReturn()) {
- __ VerifyX87StackDepth(x87_stack_.depth());
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters()) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
}
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
}
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
}
@@ -453,8 +440,9 @@ bool LCodeGen::GenerateDeferredCode() {
X87Stack copy(code->x87_stack());
x87_stack_ = copy;
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -532,6 +520,16 @@ void LCodeGen::X87LoadForUsage(X87Register reg) {
}
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
ASSERT(is_mutable_);
ASSERT(Contains(reg) && stack_depth_ > other_slot);
@@ -931,8 +929,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -954,13 +950,12 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(fun, argc);
+ __ CallRuntime(fun, argc, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -1122,26 +1117,31 @@ void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -1246,7 +1246,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -1258,17 +1258,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1336,11 +1329,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1733,9 +1721,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
- case 16:
- __ shl(left, 4);
- break;
+ case 16:
+ __ shl(left, 4);
+ break;
default:
__ imul(left, left, constant);
break;
@@ -1967,9 +1955,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(xmm_scratch, Operand(temp));
+ __ por(res, xmm_scratch);
}
}
}
@@ -2178,7 +2167,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -2208,8 +2197,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
__ addsd(left, right);
@@ -2229,17 +2216,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
4);
// Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
+ // Store it into the result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
@@ -2272,6 +2259,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ PrepareCallCFunction(4, eax);
X87Mov(Operand(esp, 1 * kDoubleSize), right);
X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
@@ -2301,14 +2290,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
@@ -2340,25 +2321,6 @@ void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2369,8 +2331,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2390,8 +2353,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2476,8 +2440,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(not_equal, &not_heap_number, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
} else {
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
@@ -2521,6 +2486,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -2556,10 +2525,15 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), SSE2);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ } else {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ }
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
@@ -2626,7 +2600,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movdbl(MemOperand(esp, 0), input_reg);
+ __ movsd(MemOperand(esp, 0), input_reg);
} else {
__ fstp_d(MemOperand(esp, 0));
}
@@ -3016,14 +2990,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -3096,7 +3062,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -3131,7 +3097,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -3154,7 +3120,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3245,12 +3211,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- if (instr->object()->IsConstantOperand()) {
- ExternalReference external_reference = ToExternalReference(
- LConstantOperand::cast(instr->object()));
- __ mov(result, MemOperand::StaticVariable(external_reference));
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ if (access.representation().IsByte()) {
+ ASSERT(instr->hydrogen()->representation().IsInteger32());
+ __ movzx_b(result, operand);
} else {
- __ mov(result, MemOperand(ToRegister(instr->object()), offset));
+ __ mov(result, operand);
}
return;
}
@@ -3261,7 +3230,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, FieldOperand(object, offset));
+ __ movsd(result, FieldOperand(object, offset));
} else {
X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
@@ -3269,11 +3238,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ mov(result, FieldOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset));
+ object = result;
+ }
+ if (access.representation().IsByte()) {
+ ASSERT(instr->hydrogen()->representation().IsInteger32());
+ __ movzx_b(result, FieldOperand(object, offset));
+ } else {
+ __ mov(result, FieldOperand(object, offset));
}
}
@@ -3349,6 +3322,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3405,7 +3384,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ __ movsd(ToDoubleRegister(instr->result()), operand);
} else {
X87Mov(ToX87Register(instr->result()), operand);
}
@@ -3476,7 +3455,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
+ __ movsd(result, double_load_operand);
} else {
X87Mov(ToX87Register(instr->result()), double_load_operand);
}
@@ -3693,7 +3672,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
@@ -3778,9 +3756,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
if (can_invoke_directly) {
if (edi_state == EDI_UNINITIALIZED) {
__ LoadHeapObject(edi, function);
@@ -3805,6 +3780,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
@@ -3903,11 +3879,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
@@ -3924,7 +3900,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3977,7 +3953,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
@@ -3992,14 +3968,14 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half);
@@ -4013,7 +3989,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ jmp(&done);
__ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero);
@@ -4027,7 +4003,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ j(equal, &done);
__ sub(output_reg, Immediate(1));
@@ -4059,7 +4035,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -4178,8 +4154,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(scratch4, scratch3);
__ movd(result, random);
@@ -4193,29 +4168,29 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ movsd(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ __ movsd(input_reg, Operand::StaticVariable(ninf));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
+ __ movsd(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
+ __ movsd(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
@@ -4225,10 +4200,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
@@ -4273,7 +4249,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
@@ -4409,7 +4384,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -4441,11 +4416,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
ToExternalReference(LConstantOperand::cast(instr->object())))
: MemOperand(ToRegister(instr->object()), offset);
if (instr->value()->IsConstantOperand()) {
+ ASSERT(!representation.IsByte());
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
__ mov(operand, Immediate(ToInteger32(operand_value)));
} else {
Register value = ToRegister(instr->value());
- __ mov(operand, value);
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
}
return;
}
@@ -4480,7 +4460,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(object, offset), value);
+ __ movsd(FieldOperand(object, offset), value);
} else {
X87Register value = ToX87Register(instr->value());
X87Mov(FieldOperand(object, offset), value);
@@ -4518,17 +4498,28 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
+ MemOperand operand = FieldOperand(write_register, offset);
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
+ Register value = ToRegister(operand_value);
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(FieldOperand(write_register, offset), handle_value);
+ __ mov(operand, handle_value);
}
} else {
- __ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ Register value = ToRegister(instr->value());
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4609,8 +4600,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
} else {
__ fld(0);
__ fstp_s(operand);
@@ -4618,7 +4610,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
X87Mov(operand, ToX87Register(instr->value()));
}
@@ -4676,11 +4668,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ ucomisd(value, value);
__ j(parity_odd, &have_value); // NaN.
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
}
- __ movdbl(double_store_operand, value);
+ __ movsd(double_store_operand, value);
} else {
// Can't use SSE2 in the serializer
if (instr->hydrogen()->IsConstantHoleStore()) {
@@ -4803,8 +4795,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
}
@@ -4825,9 +4819,8 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
+ Immediate(to_map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
@@ -4978,7 +4971,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else if (input->IsRegister()) {
Register input_reg = ToRegister(input);
__ push(input_reg);
@@ -5001,14 +4994,32 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ LOperand* temp = instr->temp();
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+ } else {
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+ }
+}
+
+
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ Register input = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
}
@@ -5073,6 +5084,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -5087,7 +5099,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
@@ -5096,7 +5108,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg,
+ __ LoadUint32(xmm_scratch, reg,
ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
@@ -5132,12 +5144,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5181,7 +5193,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5308,7 +5320,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -5317,28 +5329,17 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
} else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
-
- // Convert undefined (and hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
}
+
// Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
@@ -5347,6 +5348,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movsd(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5356,7 +5370,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// input register since we avoid dependencies.
__ mov(temp_reg, input_reg);
__ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(temp_reg));
+ __ Cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
@@ -5364,25 +5378,36 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
-
if (instr->truncating()) {
- Label heap_number, slow_case;
+ Label no_heap_number, check_bools, check_false;
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Set(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
+ __ Set(input_reg, Immediate(0));
__ jmp(done);
-
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5417,12 +5442,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5487,7 +5516,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5514,7 +5544,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5594,7 +5625,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
@@ -5649,22 +5680,21 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
@@ -5679,8 +5709,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -5696,6 +5727,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -5714,8 +5747,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -6146,14 +6179,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
}
}
@@ -6162,7 +6194,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6233,7 +6265,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -6246,7 +6278,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 769917f7e2..78bc69de91 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -33,6 +33,7 @@
#include "checks.h"
#include "deoptimizer.h"
#include "ia32/lithium-gap-resolver-ia32.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -45,45 +46,28 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -129,12 +113,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
void X87Fxch(X87Register reg, int other_slot = 0) {
x87_stack_.Fxch(reg, other_slot);
}
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
bool X87StackEmpty() {
return x87_stack_.depth() == 0;
@@ -188,27 +177,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock() const;
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -220,14 +195,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
@@ -251,7 +226,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
@@ -331,9 +307,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -395,7 +370,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -425,26 +400,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void MakeSureStackPagesMapped(int offset);
#endif
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
class X87Stack {
@@ -505,8 +470,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index b5bc18bdc9..2b2126af9d 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -326,7 +326,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- __ movdbl(dst, Operand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
@@ -360,7 +360,7 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
}
} else {
// load from the register onto the stack, store in destination, which must
@@ -378,12 +378,12 @@ void LGapResolver::EmitMove(int index) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else {
// load from the stack slot on top of the floating point stack, and then
@@ -486,9 +486,9 @@ void LGapResolver::EmitSwap(int index) {
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movsd(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
@@ -499,12 +499,12 @@ void LGapResolver::EmitSwap(int index) {
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
+ __ movsd(src0, xmm0);
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index ca1e60d644..fdddef3f47 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -386,9 +386,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) {
+ if (kind == DOUBLE_REGISTERS) {
spill_slot_count_++;
spill_slot_count_ |= 1;
num_double_slots_++;
@@ -397,11 +397,12 @@ int LPlatformChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -479,7 +480,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
ASSERT_EQ(alignment_state_index, 0);
USE(alignment_state_index);
}
@@ -488,7 +489,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -560,29 +561,34 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) {
}
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
@@ -707,7 +713,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -762,52 +768,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseFixed(right_value, ecx);
}
- } else {
- right = UseFixed(right_value, ecx);
- }
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -816,21 +814,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -914,10 +913,31 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ HValue* first_operand = current->OperandCount() == 0
+ ? graph()->GetConstant1()
+ : current->OperandAt(0);
+ instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand)));
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -947,7 +967,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -964,7 +983,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
clobber->set_hydrogen_value(current);
chunk_->AddInstruction(clobber, current_block_);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1061,21 +1079,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor);
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
ToBooleanStub::Types expected = instr->expected_input_types();
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
// environment is needed since all cases are handled.
+ HValue* value = instr->value();
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
@@ -1141,12 +1153,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1171,7 +1177,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseAny(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1238,7 +1243,6 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1246,7 +1250,6 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1356,7 +1359,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1364,7 +1366,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1372,14 +1373,12 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1387,7 +1386,6 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1396,7 +1394,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1405,14 +1402,12 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LCallFunction* result = new(zone()) LCallFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
@@ -1442,29 +1437,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1481,8 +1466,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1584,17 +1570,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1618,7 +1597,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1639,7 +1617,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1671,7 +1648,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1752,9 +1728,12 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
ASSERT(instr->right()->representation().IsDouble());
LOperand* left;
LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
} else {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
@@ -1766,6 +1745,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1774,8 +1755,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1909,6 +1890,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
@@ -1944,7 +1932,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = TempRegister();
@@ -2015,8 +2002,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2050,12 +2038,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2234,6 +2216,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2435,7 +2422,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
- if (needs_write_barrier) {
+ if (instr->field_representation().IsByte()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
@@ -2582,7 +2573,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2711,7 +2701,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 3a609c991a..752fdd4f6a 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -107,7 +107,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -116,7 +115,6 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
@@ -130,6 +128,7 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
@@ -184,6 +183,7 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
@@ -215,7 +215,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -256,15 +255,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -310,7 +300,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -922,19 +911,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1144,19 +1120,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1309,7 +1272,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1605,6 +1568,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1634,11 +1606,6 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -2061,8 +2028,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2105,6 +2077,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -2189,7 +2174,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2364,8 +2349,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2515,12 +2502,13 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
+ LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2742,8 +2730,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
: LChunk(info, graph),
num_double_slots_(0) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
int num_double_slots() const { return num_double_slots_; }
@@ -2765,13 +2753,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2907,7 +2896,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
@@ -2921,7 +2910,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index b65d328435..025bd891c2 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -232,7 +233,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
j(not_equal, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), input_reg);
+ movsd(MemOperand(esp, 0), input_reg);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
bind(&done);
@@ -253,8 +254,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fist_s(MemOperand(esp, 0));
fld(0);
+ fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@@ -283,7 +284,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label::Distance dst) {
ASSERT(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
- cvtsi2sd(scratch, Operand(result_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -344,7 +345,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
cmp(result_reg, 0x80000000u);
j(not_equal, &done, Label::kNear);
@@ -361,7 +362,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
if (input_reg.is(result_reg)) {
// Input is clobbered. Restore number from double scratch.
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), xmm0);
+ movsd(MemOperand(esp, 0), xmm0);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
} else {
@@ -390,9 +391,9 @@ void MacroAssembler::TaggedToI(Register result_reg,
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cvtsi2sd(temp, Operand(result_reg));
+ Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, Label::kNear);
@@ -445,25 +446,36 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
Label done;
cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -676,6 +688,12 @@ void MacroAssembler::DebugBreak() {
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -799,9 +817,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -821,7 +839,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
@@ -834,8 +852,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -996,6 +1014,30 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
@@ -1051,7 +1093,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -1095,7 +1137,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
@@ -1109,14 +1151,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -1128,11 +1172,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -2141,23 +2185,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -2172,7 +2202,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -2221,11 +2252,13 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Operand thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2281,9 +2314,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Label prologue;
// Load the value from ReturnValue
- mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+ mov(eax, return_value_operand);
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2303,6 +2337,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -2339,11 +2374,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -3003,6 +3046,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -3408,9 +3533,8 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
-
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3419,12 +3543,11 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Immediate(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e4e4533bf5..30f8a8dfbb 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -225,6 +225,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -240,7 +243,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -366,6 +369,12 @@ class MacroAssembler: public Assembler {
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeSet(Register dst, const Immediate& x);
@@ -509,6 +518,7 @@ class MacroAssembler: public Assembler {
}
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32NoSSE2(Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
@@ -754,11 +764,18 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -807,7 +824,8 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- int return_value_offset_from_ebp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -890,6 +908,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
@@ -931,9 +960,20 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal
+ // If allocation info is present, conditional code is set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
private:
bool generating_stub_;
@@ -957,7 +997,7 @@ class MacroAssembler: public Assembler {
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 354c2fdcb0..9786cffe86 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -329,32 +329,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -462,50 +458,50 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : ReturnValue default value
- // -- esp[24] : ReturnValue
- // -- esp[28] : last argument
+ // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- esp[32] : last argument
// -- ...
- // -- esp[(argc + 6) * 4] : first argument
- // -- esp[(argc + 7) * 4] : receiver
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
// -----------------------------------
+
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
+ // Construct the FunctionCallbackInfo.
+ __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
+ Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
Immediate(reinterpret_cast<int>(masm->isolate())));
- __ mov(Operand(esp, 5 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
masm->isolate()->factory()->undefined_value());
- __ mov(Operand(esp, 6 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
- __ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
-
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(eax, Operand(esp, 1 * kPointerSize));
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
@@ -521,14 +517,14 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
- // v8::Arguments::implicit_args_.
+ // FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), eax);
- __ add(eax, Immediate(argc * kPointerSize));
- // v8::Arguments::values_.
+ __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
__ mov(ApiParameterOperand(3), eax);
- // v8::Arguments::length_.
+ // FunctionCallbackInfo::length_.
__ Set(ApiParameterOperand(4), Immediate(argc));
- // v8::Arguments::is_construct_call_.
+ // FunctionCallbackInfo::is_construct_call_.
__ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
@@ -537,11 +533,17 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ Operand return_value_operand(ebp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -556,6 +558,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
// Copy return value.
__ mov(scratch, Operand(esp, 0));
// Assign stack space for the call arguments.
@@ -563,7 +566,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Move the return address on top of the stack.
__ mov(Operand(esp, 0), scratch);
// Write holder to stack frame.
- __ mov(Operand(esp, 1 * kPointerSize), receiver);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver);
// Write receiver to stack frame.
int index = stack_space;
__ mov(Operand(esp, index-- * kPointerSize), receiver);
@@ -574,7 +577,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(Operand(esp, index-- * kPointerSize), values[i]);
}
- GenerateFastApiCall(masm, optimization, argc);
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -688,7 +691,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -776,9 +779,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Immediate(name));
@@ -809,7 +812,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -827,19 +830,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -862,7 +865,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -876,7 +879,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
@@ -884,7 +887,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
@@ -998,15 +1001,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1041,7 +1044,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -1054,14 +1057,14 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
}
@@ -1160,6 +1163,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ mov(scratch1, Handle<Map>(object->map()));
@@ -1176,7 +1180,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int depth = 0;
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Traverse the prototype chain and check the maps in the prototype chain for
@@ -1237,7 +1241,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1269,9 +1273,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
__ bind(miss);
@@ -1280,9 +1284,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
GenerateRestoreName(masm(), miss, name);
@@ -1291,7 +1295,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1351,7 +1355,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+void LoadStubCompiler::NonexistentHandlerFrontend(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@@ -1371,10 +1375,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1390,34 +1394,32 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
__ push(receiver()); // receiver
- __ mov(scratch2(), esp);
- ASSERT(!scratch2().is(reg));
// Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- Register scratch = reg.is(scratch1()) ? receiver() : scratch1();
- __ mov(scratch, Immediate(callback));
- __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset));
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
@@ -1427,9 +1429,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(reg); // holder
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
- __ push(scratch2());
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
__ push(name()); // name
__ mov(ebx, esp); // esp points to reference to name (handler).
@@ -1460,18 +1462,19 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_address,
ApiParameterOperand(2),
kStackSpace,
- 7);
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
__ ret(0);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> interceptor_holder,
@@ -2394,7 +2397,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
@@ -2444,7 +2447,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
@@ -2623,7 +2626,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -3111,18 +3114,14 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index e6ff2daa62..06cbf2e112 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -102,9 +102,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
JSObject* holder) {
- if (object->IsJSObject()) {
- return GetCodeCacheForObject(JSObject::cast(object), holder);
- }
+ if (object->IsJSObject()) return OWN_MAP;
+
// If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsSymbol() ||
object->IsNumber() || object->IsBoolean());
@@ -112,23 +111,6 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
}
-InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
- JSObject* holder) {
- // Fast-properties and global objects store stubs in their own maps.
- // Slow properties objects use prototype's map (unless the property is its own
- // when holder == object). It works because slow properties objects having
- // the same prototype (or a prototype with the same map) and not having
- // the property are interchangeable for such a stub.
- if (holder != object &&
- !object->HasFastProperties() &&
- !object->IsJSGlobalProxy() &&
- !object->IsJSGlobalObject()) {
- return PROTOTYPE_MAP;
- }
- return OWN_MAP;
-}
-
-
JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
Object* object,
InlineCacheHolderFlag holder) {
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 55187514f9..55d7ba936f 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -71,19 +71,16 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target) {
+ Handle<Object> name) {
if (FLAG_trace_ic) {
- Object* undef = new_target->GetHeap()->undefined_value();
- State new_state = StateFrom(new_target, undef, undef);
- PrintF("[%s in ", type);
- Isolate* isolate = new_target->GetIsolate();
- StackFrameIterator it(isolate);
+ Code* new_target = raw_target();
+ State new_state = new_target->ic_state();
+ PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+ StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Code* apply_builtin = isolate->builtins()->builtin(
+ Code* apply_builtin = isolate()->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
PrintF("apply from ");
@@ -91,12 +88,12 @@ void IC::TraceIC(const char* type,
raw_frame = it.frame();
}
}
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- Code::ExtraICState state = new_target->extra_ic_state();
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ Code::ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier =
- GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(state));
+ GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(extra_state));
PrintF(" (%c->%c%s)",
- TransitionMarkFromState(old_state),
+ TransitionMarkFromState(state()),
TransitionMarkFromState(new_state),
modifier);
name->Print();
@@ -117,10 +114,12 @@ void IC::TraceIC(const char* type,
#define TRACE_GENERIC_IC(isolate, type, reason)
#endif // DEBUG
-#define TRACE_IC(type, name, old_state, new_target) \
- ASSERT((TraceIC(type, name, old_state, new_target), true))
+#define TRACE_IC(type, name) \
+ ASSERT((TraceIC(type, name), true))
-IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
+IC::IC(FrameDepth depth, Isolate* isolate)
+ : isolate_(isolate),
+ target_set_(false) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
@@ -145,6 +144,8 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
#endif
fp_ = fp;
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
+ target_ = handle(raw_target(), isolate);
+ state_ = target_->ic_state();
}
@@ -179,32 +180,130 @@ Address IC::OriginalCodeAddress() const {
#endif
-static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
- Object* receiver,
- Object* name) {
- if (target->is_keyed_load_stub() ||
- target->is_keyed_call_stub() ||
- target->is_keyed_store_stub()) {
+static bool HasInterceptorGetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->getter()->IsUndefined();
+}
+
+
+static bool HasInterceptorSetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->setter()->IsUndefined();
+}
+
+
+static void LookupForRead(Handle<Object> object,
+ Handle<String> name,
+ LookupResult* lookup) {
+ // Skip all the objects with named interceptors, but
+ // without actual getter.
+ while (true) {
+ object->Lookup(*name, lookup);
+ // Besides normal conditions (property not found or it's not
+ // an interceptor), bail out if lookup is not cacheable: we won't
+ // be able to IC it anyway and regular lookup should work fine.
+ if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
+ return;
+ }
+
+ Handle<JSObject> holder(lookup->holder(), lookup->isolate());
+ if (HasInterceptorGetter(*holder)) {
+ return;
+ }
+
+ holder->LocalLookupRealNamedProperty(*name, lookup);
+ if (lookup->IsFound()) {
+ ASSERT(!lookup->IsInterceptor());
+ return;
+ }
+
+ Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
+ if (proto->IsNull()) {
+ ASSERT(!lookup->IsFound());
+ return;
+ }
+
+ object = proto;
+ }
+}
+
+
+bool CallIC::TryUpdateExtraICState(LookupResult* lookup,
+ Handle<Object> object) {
+ if (!lookup->IsConstantFunction()) return false;
+ JSFunction* function = lookup->GetConstantFunction();
+ if (!function->shared()->HasBuiltinFunctionId()) return false;
+
+ // Fetch the arguments passed to the called function.
+ const int argc = target()->arguments_count();
+ Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
+ Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ Arguments args(argc + 1,
+ &Memory::Object_at(fp +
+ StandardFrameConstants::kCallerSPOffset +
+ argc * kPointerSize));
+ switch (function->shared()->builtin_function_id()) {
+ case kStringCharCodeAt:
+ case kStringCharAt:
+ if (object->IsString()) {
+ String* string = String::cast(*object);
+ // Check there's the right string value or wrapper in the receiver slot.
+ ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
+ // If we're in the default (fastest) state and the index is
+ // out of bounds, update the state to record this fact.
+ if (StringStubState::decode(extra_ic_state()) == DEFAULT_STRING_STUB &&
+ argc >= 1 && args[1]->IsNumber()) {
+ double index = DoubleToInteger(args.number_at(1));
+ if (index < 0 || index >= string->length()) {
+ extra_ic_state_ =
+ StringStubState::update(extra_ic_state(),
+ STRING_INDEX_OUT_OF_BOUNDS);
+ return true;
+ }
+ }
+ }
+ break;
+ default:
+ return false;
+ }
+ return false;
+}
+
+
+bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name) {
+ DisallowHeapAllocation no_gc;
+
+ if (target()->is_call_stub()) {
+ LookupResult lookup(isolate());
+ LookupForRead(receiver, name, &lookup);
+ if (static_cast<CallIC*>(this)->TryUpdateExtraICState(&lookup, receiver)) {
+ return true;
+ }
+ }
+
+ if (target()->is_keyed_stub()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
- Name* stub_name = target->FindFirstName();
- if (Name::cast(name) != stub_name) return false;
+ Name* stub_name = target()->FindFirstName();
+ if (*name != stub_name) return false;
}
InlineCacheHolderFlag cache_holder =
- Code::ExtractCacheHolderFromFlags(target->flags());
+ Code::ExtractCacheHolderFromFlags(target()->flags());
- Isolate* isolate = target->GetIsolate();
- if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
- // The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- } else if (cache_holder == PROTOTYPE_MAP &&
- receiver->GetPrototype(isolate)->IsNull()) {
- // IC::GetCodeCacheHolder is not applicable.
- return false;
+ switch (cache_holder) {
+ case OWN_MAP:
+ // The stub was generated for JSObject but called for non-JSObject.
+ // IC::GetCodeCacheHolder is not applicable.
+ if (!receiver->IsJSObject()) return false;
+ break;
+ case PROTOTYPE_MAP:
+ // IC::GetCodeCacheHolder is not applicable.
+ if (receiver->GetPrototype(isolate())->IsNull()) return false;
+ break;
}
- Map* map = IC::GetCodeCacheHolder(isolate, receiver, cache_holder)->map();
+
+ Handle<Map> map(
+ IC::GetCodeCacheHolder(isolate(), *receiver, cache_holder)->map());
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@@ -214,20 +313,11 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
- int index = map->IndexInCodeCache(name, target);
+ int index = map->IndexInCodeCache(*name, *target());
if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), target, index);
- // For loads and stores, handlers are stored in addition to the ICs on the
- // map. Remove those, too.
- if ((target->is_load_stub() || target->is_keyed_load_stub() ||
- target->is_store_stub() || target->is_keyed_store_stub()) &&
- target->type() != Code::NORMAL) {
- Code* handler = target->FindFirstCode();
- index = map->IndexInCodeCache(name, handler);
- if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), handler, index);
- }
- }
+ map->RemoveFromCodeCache(*name, *target(), index);
+ // Handlers are stored in addition to the ICs on the map. Remove those, too.
+ TryRemoveInvalidHandlers(map, name);
return true;
}
@@ -240,8 +330,8 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
if (cache_holder == OWN_MAP) {
- Map* old_map = target->FindFirstMap();
- if (old_map == map) return true;
+ Map* old_map = target()->FindFirstMap();
+ if (old_map == *map) return true;
if (old_map != NULL) {
if (old_map->is_deprecated()) return true;
if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
@@ -252,11 +342,9 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
}
if (receiver->IsGlobalObject()) {
- if (!name->IsName()) return false;
- Isolate* isolate = target->GetIsolate();
- LookupResult lookup(isolate);
- GlobalObject* global = GlobalObject::cast(receiver);
- global->LocalLookupRealNamedProperty(Name::cast(name), &lookup);
+ LookupResult lookup(isolate());
+ GlobalObject* global = GlobalObject::cast(*receiver);
+ global->LocalLookupRealNamedProperty(*name, &lookup);
if (!lookup.IsFound()) return false;
PropertyCell* cell = global->GetPropertyCell(&lookup);
return cell->type()->IsConstant();
@@ -266,21 +354,38 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
}
-IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
- IC::State state = target->ic_state();
+void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
+ CodeHandleList handlers;
+ target()->FindHandlers(&handlers);
+ for (int i = 0; i < handlers.length(); i++) {
+ Handle<Code> handler = handlers.at(i);
+ int index = map->IndexInCodeCache(*name, *handler);
+ if (index >= 0) {
+ map->RemoveFromCodeCache(*name, *handler, index);
+ return;
+ }
+ }
+}
+
- if (state != MONOMORPHIC || !name->IsString()) return state;
- if (receiver->IsUndefined() || receiver->IsNull()) return state;
+void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
+ if (!name->IsString()) return;
+ if (state() != MONOMORPHIC) {
+ if (state() == POLYMORPHIC && receiver->IsHeapObject()) {
+ TryRemoveInvalidHandlers(
+ handle(Handle<HeapObject>::cast(receiver)->map()),
+ Handle<String>::cast(name));
+ }
+ return;
+ }
+ if (receiver->IsUndefined() || receiver->IsNull()) return;
- Code::Kind kind = target->kind();
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
- // Call stubs handle this later to allow extra IC state
- // transitions.
- if (kind != Code::CALL_IC && kind != Code::KEYED_CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
- return MONOMORPHIC_PROTOTYPE_FAILURE;
+ if (TryRemoveInvalidPrototypeDependentStub(
+ receiver, Handle<String>::cast(name))) {
+ return MarkMonomorphicPrototypeFailure();
}
// The builtins object is special. It only changes when JavaScript
@@ -289,11 +394,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// an inline cache miss for the builtins object after lazily loading
// JavaScript builtins, we return uninitialized as the state to
// force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) {
- return UNINITIALIZED;
- }
-
- return MONOMORPHIC;
+ if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
}
@@ -403,7 +504,7 @@ void IC::Clear(Isolate* isolate, Address address) {
void CallICBase::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
Code* code =
target->GetIsolate()->stub_cache()->FindCallInitialize(
@@ -415,35 +516,33 @@ void CallICBase::Clear(Address address, Code* target) {
void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *initialize_stub(isolate));
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
}
void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, *initialize_stub(isolate));
+ if (IsCleared(target)) return;
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
}
void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict(isolate)
- : *initialize_stub(isolate));
+ *pre_monomorphic_stub(
+ isolate, Code::GetStrictMode(target->extra_ic_state())));
}
void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict(isolate)
- : *initialize_stub(isolate));
+ *pre_monomorphic_stub(
+ isolate, Code::GetStrictMode(target->extra_ic_state())));
}
@@ -460,47 +559,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
}
-static bool HasInterceptorGetter(JSObject* object) {
- return !object->GetNamedInterceptor()->getter()->IsUndefined();
-}
-
-
-static void LookupForRead(Handle<Object> object,
- Handle<String> name,
- LookupResult* lookup) {
- // Skip all the objects with named interceptors, but
- // without actual getter.
- while (true) {
- object->Lookup(*name, lookup);
- // Besides normal conditions (property not found or it's not
- // an interceptor), bail out if lookup is not cacheable: we won't
- // be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
- return;
- }
-
- Handle<JSObject> holder(lookup->holder(), lookup->isolate());
- if (HasInterceptorGetter(*holder)) {
- return;
- }
-
- holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsFound()) {
- ASSERT(!lookup->IsInterceptor());
- return;
- }
-
- Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
- if (proto->IsNull()) {
- ASSERT(!lookup->IsFound());
- return;
- }
-
- object = proto;
- }
-}
-
-
Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
@@ -545,16 +603,18 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
}
-MaybeObject* CallICBase::LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
+static bool MigrateDeprecated(Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (!receiver->map()->is_deprecated()) return false;
+ JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+ return true;
+}
+
+
+MaybeObject* CallICBase::LoadFunction(Handle<Object> object,
Handle<String> name) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
- }
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -590,9 +650,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
// Lookup is valid: Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, extra_ic_state, object, name);
- }
+ if (use_ic) UpdateCaches(&lookup, object, name);
// Get the property.
PropertyAttributes attr;
@@ -637,53 +695,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
-bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state) {
- ASSERT(kind_ == Code::CALL_IC);
- if (!lookup->IsConstantFunction()) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index = DoubleToInteger(args.number_at(1));
- if (index < 0 || index >= string->length()) {
- *extra_ic_state =
- StringStubState::update(*extra_ic_state,
- STRING_INDEX_OUT_OF_BOUNDS);
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name) {
int argc = target()->arguments_count();
@@ -692,7 +704,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
case FIELD: {
PropertyIndex index = lookup->GetFieldIndex();
return isolate()->stub_cache()->ComputeCallField(
- argc, kind_, extra_state, name, object, holder, index);
+ argc, kind_, extra_ic_state(), name, object, holder, index);
}
case CONSTANT: {
if (!lookup->IsConstantFunction()) return Handle<Code>::null();
@@ -701,7 +713,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
// that the code stub is in the stub cache.
Handle<JSFunction> function(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeCallConstant(
- argc, kind_, extra_state, name, object, holder, function);
+ argc, kind_, extra_ic_state(), name, object, holder, function);
}
case NORMAL: {
// If we return a null handle, the IC will not be patched.
@@ -715,7 +727,8 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
Handle<JSFunction> function(JSFunction::cast(cell->value()));
return isolate()->stub_cache()->ComputeCallGlobal(
- argc, kind_, extra_state, name, receiver, global, cell, function);
+ argc, kind_, extra_ic_state(), name,
+ receiver, global, cell, function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
@@ -723,117 +736,74 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
// applicable.
if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
return isolate()->stub_cache()->ComputeCallNormal(
- argc, kind_, extra_state);
+ argc, kind_, extra_ic_state());
}
break;
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(*holder));
return isolate()->stub_cache()->ComputeCallInterceptor(
- argc, kind_, extra_state, name, object, holder);
+ argc, kind_, extra_ic_state(), name, object, holder);
default:
return Handle<Code>::null();
}
}
+Handle<Code> CallICBase::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeCallMegamorphic(
+ target()->arguments_count(), kind_, extra_ic_state());
+}
+
+
+Handle<Code> CallICBase::pre_monomorphic_stub() {
+ return isolate()->stub_cache()->ComputeCallPreMonomorphic(
+ target()->arguments_count(), kind_, extra_ic_state());
+}
+
+
void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
// Compute the number of arguments.
- int argc = target()->arguments_count();
Handle<Code> code;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
- argc, kind_, extra_ic_state);
- } else if (state == MONOMORPHIC) {
- if (kind_ == Code::CALL_IC &&
- TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else if (TryRemoveInvalidPrototypeDependentStub(target(),
- *object,
- *name)) {
- state = MONOMORPHIC_PROTOTYPE_FAILURE;
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else {
- code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, kind_, extra_ic_state);
- }
- } else {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- }
+ code = state() == UNINITIALIZED
+ ? pre_monomorphic_stub()
+ : ComputeMonomorphicStub(lookup, object, name);
// If there's no appropriate stub we simply avoid updating the caches.
+ // TODO(verwaest): Install a slow fallback in this case to avoid not learning,
+ // and deopting Crankshaft code.
if (code.is_null()) return;
- // Patch the call site depending on the state of the cache.
- switch (state) {
- case UNINITIALIZED:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- case PREMONOMORPHIC:
- case MONOMORPHIC:
- set_target(*code);
- break;
- case MEGAMORPHIC: {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
- isolate());
- // Update the stub cache.
- UpdateMegamorphicCache(cache_object->map(), *name, *code);
- break;
- }
- case DEBUG_STUB:
- break;
- case POLYMORPHIC:
- case GENERIC:
- UNREACHABLE();
- break;
- }
+ Handle<JSObject> cache_object = object->IsJSObject()
+ ? Handle<JSObject>::cast(object)
+ : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
+ isolate());
- TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
- name, state, target());
+ PatchCache(cache_object, name, code);
+ TRACE_IC("CallIC", name);
}
-MaybeObject* KeyedCallIC::LoadFunction(State state,
- Handle<Object> object,
+MaybeObject* KeyedCallIC::LoadFunction(Handle<Object> object,
Handle<Object> key) {
if (key->IsInternalizedString()) {
- return CallICBase::LoadFunction(state,
- Code::kNoExtraICState,
- object,
- Handle<String>::cast(key));
- }
-
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
+ return CallICBase::LoadFunction(object, Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
+ bool use_ic = MigrateDeprecated(object)
+ ? false : FLAG_use_ic && !object->IsAccessCheckNeeded();
- if (use_ic && state != MEGAMORPHIC) {
+ if (use_ic && state() != MEGAMORPHIC) {
+ ASSERT(!object->IsJSGlobalProxy());
int argc = target()->arguments_count();
Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
@@ -846,7 +816,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
}
ASSERT(!stub.is_null());
set_target(*stub);
- TRACE_IC("KeyedCallIC", key, state, target());
+ TRACE_IC("CallIC", key);
}
Handle<Object> result = GetProperty(isolate(), object, key);
@@ -865,8 +835,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
}
-MaybeObject* LoadIC::Load(State state,
- Handle<Object> object,
+MaybeObject* LoadIC::Load(Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -879,32 +848,27 @@ MaybeObject* LoadIC::Load(State state,
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
- if ((object->IsString() || object->IsStringWrapper()) &&
+ if (object->IsStringWrapper() &&
name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- StringLengthStub string_length_stub(kind(), !object->IsString());
+ } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) {
+ StringLengthStub string_length_stub(kind());
stub = string_length_stub.GetCode(isolate());
- } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- StringLengthStub string_length_stub(kind(), true);
- stub = string_length_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
+ } else if (state() != MEGAMORPHIC) {
+ ASSERT(state() != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
set_target(*stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
#endif
}
// Get the string if we have a string wrapper object.
- Handle<Object> string = object->IsJSValue()
- ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
- : object;
- return Smi::FromInt(String::cast(*string)->length());
+ String* string = String::cast(JSValue::cast(*object)->value());
+ return Smi::FromInt(string->length());
}
// Use specialized code for getting prototype of functions.
@@ -912,13 +876,13 @@ MaybeObject* LoadIC::Load(State state,
name->Equals(isolate()->heap()->prototype_string()) &&
Handle<JSFunction>::cast(object)->should_have_prototype()) {
Handle<Code> stub;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
+ } else if (state() == PREMONOMORPHIC) {
FunctionPrototypeStub function_prototype_stub(kind());
stub = function_prototype_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
+ } else if (state() != MEGAMORPHIC) {
+ ASSERT(state() != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
@@ -940,12 +904,7 @@ MaybeObject* LoadIC::Load(State state,
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
- }
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
// Named lookup in the object.
LookupResult lookup(isolate());
@@ -960,24 +919,20 @@ MaybeObject* LoadIC::Load(State state,
}
// Update inline cache and stub cache.
- if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name);
+ if (use_ic) UpdateCaches(&lookup, object, name);
PropertyAttributes attr;
- if (lookup.IsInterceptor() || lookup.IsHandler()) {
- // Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsUndeclaredGlobal(object)) {
- return ReferenceError("not_defined", name);
- }
- return *result;
- }
-
// Get the property.
- return Object::GetPropertyOrFail(object, object, &lookup, name, &attr);
+ Handle<Object> result =
+ Object::GetProperty(object, object, &lookup, name, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ // If the property is not present, check if we need to throw an
+ // exception.
+ if ((lookup.IsInterceptor() || lookup.IsHandler()) &&
+ attr == ABSENT && IsUndeclaredGlobal(object)) {
+ return ReferenceError("not_defined", name);
+ }
+ return *result;
}
@@ -995,16 +950,10 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
}
-bool IC::UpdatePolymorphicIC(State state,
- Handle<HeapObject> receiver,
+bool IC::UpdatePolymorphicIC(Handle<HeapObject> receiver,
Handle<String> name,
- Handle<Code> code,
- StrictModeFlag strict_mode) {
- if (code->type() == Code::NORMAL) return false;
- if (target()->ic_state() == MONOMORPHIC &&
- target()->type() == Code::NORMAL) {
- return false;
- }
+ Handle<Code> code) {
+ if (!code->is_handler()) return false;
MapHandleList receiver_maps;
CodeHandleList handlers;
@@ -1033,13 +982,11 @@ bool IC::UpdatePolymorphicIC(State state,
}
if (number_of_valid_maps >= 4) return false;
+ if (number_of_maps == 0) return false;
- // Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC.
- // In that case, allow the IC to go back monomorphic.
- if (number_of_maps == 0 && target()->ic_state() != UNINITIALIZED) {
+ if (!target()->FindHandlers(&handlers, receiver_maps.length())) {
return false;
}
- target()->FindAllCode(&handlers, receiver_maps.length());
}
number_of_valid_maps++;
@@ -1050,73 +997,19 @@ bool IC::UpdatePolymorphicIC(State state,
handlers.Add(code);
}
- Handle<Code> ic = ComputePolymorphicIC(
- &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode);
+ Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
+ &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode());
set_target(*ic);
return true;
}
-Handle<Code> LoadIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- return isolate()->stub_cache()->ComputePolymorphicLoadIC(
- receiver_maps, handlers, number_of_valid_maps, name);
-}
-
-
-Handle<Code> StoreIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- return isolate()->stub_cache()->ComputePolymorphicStoreIC(
- receiver_maps, handlers, number_of_valid_maps, name, strict_mode);
-}
-
-
-void LoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_load_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicLoadIC(
- receiver, handler, name);
- set_target(*ic);
-}
-
-
-void KeyedLoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_keyed_load_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedLoadIC(
- receiver, handler, name);
- set_target(*ic);
-}
-
-
-void StoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_store_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicStoreIC(
- receiver, handler, name, strict_mode);
- set_target(*ic);
-}
-
-
-void KeyedStoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_keyed_store_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedStoreIC(
- receiver, handler, name, strict_mode);
+void IC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ if (!handler->is_handler()) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
+ receiver, handler, name, strict_mode());
set_target(*ic);
}
@@ -1127,7 +1020,7 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
{
DisallowHeapAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
- target()->FindAllCode(&handlers, receiver_maps.length());
+ if (!target()->FindHandlers(&handlers, receiver_maps.length())) return;
}
for (int i = 0; i < receiver_maps.length(); i++) {
UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
@@ -1151,69 +1044,58 @@ bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
}
-// Since GC may have been invoked, by the time PatchCache is called, |state| is
-// not necessarily equal to target()->state().
-void IC::PatchCache(State state,
- StrictModeFlag strict_mode,
- Handle<HeapObject> receiver,
+void IC::PatchCache(Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code) {
- switch (state) {
+ switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
case MONOMORPHIC_PROTOTYPE_FAILURE:
- UpdateMonomorphicIC(receiver, code, name, strict_mode);
+ UpdateMonomorphicIC(receiver, code, name);
break;
case MONOMORPHIC:
- // Only move to megamorphic if the target changes.
- if (target() != *code) {
- if (target()->is_load_stub() || target()->is_store_stub()) {
- bool is_same_handler = false;
- {
- DisallowHeapAllocation no_allocation;
- Code* old_handler = target()->FindFirstCode();
- is_same_handler = old_handler == *code;
- }
- if (is_same_handler
- && IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
- UpdateMonomorphicIC(receiver, code, name, strict_mode);
- break;
- }
- if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) {
- break;
- }
-
- if (target()->type() != Code::NORMAL) {
- CopyICToMegamorphicCache(name);
- }
+ // For now, call stubs are allowed to rewrite to the same stub. This
+ // happens e.g., when the field does not contain a function.
+ ASSERT(target()->is_call_stub() ||
+ target()->is_keyed_call_stub() ||
+ !target().is_identical_to(code));
+ if (!target()->is_keyed_stub()) {
+ bool is_same_handler = false;
+ {
+ DisallowHeapAllocation no_allocation;
+ Code* old_handler = target()->FindFirstHandler();
+ is_same_handler = old_handler == *code;
+ }
+ if (is_same_handler
+ && IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
+ UpdateMonomorphicIC(receiver, code, name);
+ break;
+ }
+ if (UpdatePolymorphicIC(receiver, name, code)) {
+ break;
}
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
+ CopyICToMegamorphicCache(name);
}
+
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ set_target(*megamorphic_stub());
break;
case MEGAMORPHIC:
- // Update the stub cache.
UpdateMegamorphicCache(receiver->map(), *name, *code);
break;
case POLYMORPHIC:
- if (target()->is_load_stub() || target()->is_store_stub()) {
- if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) {
+ if (target()->is_keyed_stub()) {
+ // When trying to patch a polymorphic keyed stub with anything other
+ // than another polymorphic stub, go generic.
+ set_target(*generic_stub());
+ } else {
+ if (UpdatePolymorphicIC(receiver, name, code)) {
break;
}
CopyICToMegamorphicCache(name);
UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
- } else {
- // When trying to patch a polymorphic keyed load/store element stub
- // with anything other than another polymorphic stub, go generic.
- set_target((strict_mode == kStrictMode)
- ? *generic_stub_strict()
- : *generic_stub());
+ set_target(*megamorphic_stub());
}
break;
case DEBUG_STUB:
@@ -1225,52 +1107,29 @@ void IC::PatchCache(State state,
}
-static void GetReceiverMapsForStub(Handle<Code> stub,
- MapHandleList* result) {
- ASSERT(stub->is_inline_cache_stub());
- switch (stub->ic_state()) {
- case MONOMORPHIC: {
- Map* map = stub->FindFirstMap();
- if (map != NULL) {
- result->Add(Handle<Map>(map));
- }
- break;
- }
- case POLYMORPHIC: {
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Handle<Object> object(info->target_object(), stub->GetIsolate());
- if (object->IsString()) break;
- ASSERT(object->IsMap());
- AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
- }
- break;
- }
- case MEGAMORPHIC:
- break;
- case UNINITIALIZED:
- case PREMONOMORPHIC:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- case GENERIC:
- case DEBUG_STUB:
- UNREACHABLE();
- break;
+Handle<Code> LoadIC::SimpleFieldLoad(int offset,
+ bool inobject,
+ Representation representation) {
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(inobject, offset, representation);
+ return stub.GetCode(isolate());
+ } else {
+ KeyedLoadFieldStub stub(inobject, offset, representation);
+ return stub.GetCode(isolate());
}
}
-
void LoadIC::UpdateCaches(LookupResult* lookup,
- State state,
Handle<Object> object,
Handle<String> name) {
+ // TODO(verwaest): It would be nice to support loading fields from smis as
+ // well. For now just fail to update the cache.
if (!object->IsHeapObject()) return;
Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
Handle<Code> code;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
@@ -1278,17 +1137,25 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
} else if (!lookup->IsCacheable()) {
// Bail out if the result is not cacheable.
code = slow_stub();
+ } else if (object->IsString() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ int length_index = String::kLengthOffset / kPointerSize;
+ code = SimpleFieldLoad(length_index);
} else if (!object->IsJSObject()) {
// TODO(jkummerow): It would be nice to support non-JSObjects in
// ComputeLoadHandler, then we wouldn't need to go generic here.
code = slow_stub();
+ } else if (!lookup->IsProperty()) {
+ code = kind() == Code::LOAD_IC
+ ? isolate()->stub_cache()->ComputeLoadNonexistent(
+ name, Handle<JSObject>::cast(receiver))
+ : slow_stub();
} else {
- code = ComputeLoadHandler(lookup, Handle<JSObject>::cast(receiver), name);
- if (code.is_null()) code = slow_stub();
+ code = ComputeHandler(lookup, Handle<JSObject>::cast(receiver), name);
}
- PatchCache(state, kNonStrictMode, receiver, name, code);
- TRACE_IC("LoadIC", name, state, target());
+ PatchCache(receiver, name, code);
+ TRACE_IC("LoadIC", name);
}
@@ -1299,34 +1166,56 @@ void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
}
-Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name) {
- if (!lookup->IsProperty()) {
- // Nonexistent property. The result is undefined.
- return isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
+Handle<Code> IC::ComputeHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
+ Handle<Code> code = isolate()->stub_cache()->FindHandler(
+ name, receiver, kind());
+ if (!code.is_null()) return code;
+
+ code = CompileHandler(lookup, receiver, name, value);
+
+ if (code->is_handler() && code->type() != Code::NORMAL) {
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
}
- // Compute monomorphic stub.
+ return code;
+}
+
+
+Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> unused) {
Handle<JSObject> holder(lookup->holder());
+ LoadStubCompiler compiler(isolate(), kind());
+
switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder,
- lookup->GetFieldIndex(), lookup->representation());
+ case FIELD: {
+ PropertyIndex field = lookup->GetFieldIndex();
+ if (receiver.is_identical_to(holder)) {
+ return SimpleFieldLoad(field.translate(holder),
+ field.is_inobject(holder),
+ lookup->representation());
+ }
+ return compiler.CompileLoadField(
+ receiver, holder, name, field, lookup->representation());
+ }
case CONSTANT: {
Handle<Object> constant(lookup->GetConstant(), isolate());
// TODO(2803): Don't compute a stub for cons strings because they cannot
// be embedded into code.
- if (constant->IsConsString()) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeLoadConstant(
- name, receiver, holder, constant);
+ if (constant->IsConsString()) break;
+ return compiler.CompileLoadConstant(receiver, holder, name, constant);
}
case NORMAL:
+ if (kind() != Code::LOAD_IC) break;
if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
+ // TODO(verwaest): Turn into a handler.
return isolate()->stub_cache()->ComputeLoadGlobal(
name, receiver, global, cell, lookup->IsDontDelete());
}
@@ -1335,16 +1224,25 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
// property must be found in the receiver for the stub to be
// applicable.
if (!holder.is_identical_to(receiver)) break;
- return isolate()->stub_cache()->ComputeLoadNormal(name, receiver);
+ return isolate()->builtins()->LoadIC_Normal();
case CALLBACKS: {
+ // Use simple field loads for some well-known callback properties.
+ int object_offset;
+ Handle<Map> map(receiver->map());
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
+ PropertyIndex index =
+ PropertyIndex::NewHeaderIndex(object_offset / kPointerSize);
+ return compiler.CompileLoadField(
+ receiver, receiver, name, index, Representation::Tagged());
+ }
+
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(callback);
if (v8::ToCData<Address>(info->getter()) == 0) break;
if (!info->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, info);
+ return compiler.CompileLoadCallback(receiver, holder, name, info);
} else if (callback->IsAccessorPair()) {
Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
isolate());
@@ -1354,19 +1252,11 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
- return isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, call_optimization);
+ call_optimization.IsCompatibleReceiver(*receiver)) {
+ return compiler.CompileLoadCallback(
+ receiver, holder, name, call_optimization);
}
- return isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, function);
- } else if (receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string())) {
- PropertyIndex lengthIndex =
- PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
- return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lengthIndex, Representation::Tagged());
+ return compiler.CompileLoadViaGetter(receiver, holder, name, function);
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1376,12 +1266,12 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(*holder));
- return isolate()->stub_cache()->ComputeLoadInterceptor(
- name, receiver, holder);
+ return compiler.CompileLoadInterceptor(receiver, holder, name);
default:
break;
}
- return Handle<Code>::null();
+
+ return slow_stub();
}
@@ -1406,8 +1296,6 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
- State ic_state = target()->ic_state();
-
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
@@ -1418,17 +1306,16 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there.
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
- if (target() == *string_stub()) {
+ if (target().is_identical_to(string_stub())) {
target_receiver_maps.Add(isolate()->factory()->string_map());
} else {
- GetReceiverMapsForStub(Handle<Code>(target(), isolate()),
- &target_receiver_maps);
+ target()->FindAllMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
@@ -1441,14 +1328,14 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
// monomorphic. If this optimistic assumption is not true, the IC will
// miss again and it will become polymorphic and support both the
// untransitioned and transitioned maps.
- if (ic_state == MONOMORPHIC &&
+ if (state() == MONOMORPHIC &&
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
receiver->GetElementsKind())) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
- ASSERT(ic_state != GENERIC);
+ ASSERT(state() != GENERIC);
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
@@ -1471,132 +1358,69 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
}
-MaybeObject* KeyedLoadIC::Load(State state,
- Handle<Object> object,
+MaybeObject* KeyedLoadIC::Load(Handle<Object> object,
Handle<Object> key,
ICMissMode miss_mode) {
+ if (MigrateDeprecated(object)) {
+ return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
+ }
+
+ MaybeObject* maybe_object = NULL;
+ Handle<Code> stub = generic_stub();
+
// Check for values that can be converted into an internalized string directly
// or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
- return LoadIC::Load(state, object, Handle<String>::cast(key));
- }
-
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = generic_stub();
+ maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
+ if (maybe_object->IsFailure()) return maybe_object;
+ } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+ ASSERT(!object->IsJSGlobalProxy());
if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsString() && key->IsNumber()) {
- if (state == UNINITIALIZED) {
- stub = string_stub();
- }
+ if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
-
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} else if (!key->ToSmi()->IsFailure() &&
- (target() != *non_strict_arguments_stub())) {
+ (!target().is_identical_to(non_strict_arguments_stub()))) {
stub = LoadElementStub(receiver);
}
}
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
+ }
+ }
+
+ if (!is_target_set()) {
+ if (*stub == *generic_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
ASSERT(!stub.is_null());
set_target(*stub);
- TRACE_IC("KeyedLoadIC", key, state, target());
+ TRACE_IC("LoadIC", key);
}
-
+ if (maybe_object != NULL) return maybe_object;
return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
}
-Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty()) return Handle<Code>::null();
-
- // Compute a monomorphic stub.
- Handle<JSObject> holder(lookup->holder(), isolate());
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder,
- lookup->GetFieldIndex(), lookup->representation());
- case CONSTANT: {
- Handle<Object> constant(lookup->GetConstant(), isolate());
- // TODO(2803): Don't compute a stub for cons strings because they cannot
- // be embedded into code.
- if (constant->IsConsString()) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeKeyedLoadConstant(
- name, receiver, holder, constant);
- }
- case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
- // TODO(dcarney): Handle DeclaredAccessorInfo correctly.
- if (callback_object->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback =
- Handle<ExecutableAccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) break;
- if (!callback->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
- } else if (callback_object->IsAccessorPair()) {
- Handle<Object> getter(
- Handle<AccessorPair>::cast(callback_object)->getter(),
- isolate());
- if (!getter->IsJSFunction()) break;
- if (holder->IsGlobalObject()) break;
- if (!holder->HasFastProperties()) break;
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- CallOptimization call_optimization(function);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, call_optimization);
- }
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(lookup->holder()));
- return isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- name, receiver, holder);
- default:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- return generic_stub();
- }
- return Handle<Code>::null();
-}
-
-
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value,
LookupResult* lookup,
- IC::State* state) {
+ IC* ic) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
if (lookup->holder() == *receiver) {
- if (lookup->IsInterceptor() &&
- receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
@@ -1642,22 +1466,21 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// entirely by the migration above.
receiver->map()->LookupTransition(*holder, *name, lookup);
if (!lookup->IsTransition()) return false;
- *state = MONOMORPHIC_PROTOTYPE_FAILURE;
+ ic->MarkMonomorphicPrototypeFailure();
}
return true;
}
-MaybeObject* StoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+MaybeObject* StoreIC::Store(Handle<Object> object,
Handle<String> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSReceiver::SetPropertyOrFail(
- Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode);
+ if (MigrateDeprecated(object) || object->IsJSProxy()) {
+ Handle<Object> result = JSReceiver::SetProperty(
+ Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
// If the object is undefined or null it's illegal to try to set any
@@ -1667,7 +1490,7 @@ MaybeObject* StoreIC::Store(State state,
}
// The length property of string values is read-only. Throw in strict mode.
- if (strict_mode == kStrictMode && object->IsString() &&
+ if (strict_mode() == kStrictMode && object->IsString() &&
name->Equals(isolate()->heap()->length_string())) {
return TypeError("strict_read_only_property", object, name);
}
@@ -1678,23 +1501,21 @@ MaybeObject* StoreIC::Store(State state,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
-
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode);
+ JSObject::SetElement(receiver, index, value, NONE, strict_mode());
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
// Observed objects are always modified through the runtime.
if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
// Use specialized code for setting the length of arrays with fast
@@ -1708,63 +1529,62 @@ MaybeObject* StoreIC::Store(State state,
receiver->HasFastProperties() &&
!receiver->map()->is_frozen()) {
Handle<Code> stub =
- StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate());
+ StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate());
set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ TRACE_IC("StoreIC", name);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
if (receiver->IsJSGlobalProxy()) {
if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
// Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver.
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? global_proxy_stub_strict()
- : global_proxy_stub();
+ Handle<Code> stub = global_proxy_stub();
set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
+ TRACE_IC("StoreIC", name);
}
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
LookupResult lookup(isolate());
- bool can_store = LookupForWrite(receiver, name, value, &lookup, &state);
+ bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
if (!can_store &&
- strict_mode == kStrictMode &&
+ strict_mode() == kStrictMode &&
!(lookup.IsProperty() && lookup.IsReadOnly()) &&
IsUndeclaredGlobal(object)) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
}
if (FLAG_use_ic) {
- if (state == UNINITIALIZED) {
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? pre_monomorphic_stub_strict()
- : pre_monomorphic_stub();
+ if (state() == UNINITIALIZED) {
+ Handle<Code> stub = pre_monomorphic_stub();
set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
+ TRACE_IC("StoreIC", name);
} else if (can_store) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ UpdateCaches(&lookup, receiver, name, value);
} else if (!name->IsCacheable(isolate()) ||
lookup.IsNormal() ||
(lookup.IsField() && lookup.CanHoldValue(value))) {
- Handle<Code> stub = (strict_mode == kStrictMode) ? generic_stub_strict()
- : generic_stub();
+ Handle<Code> stub = generic_stub();
set_target(*stub);
}
}
// Set the property.
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
void StoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
@@ -1774,31 +1594,39 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// These are not cacheable, so we never see such LookupResults here.
ASSERT(!lookup->IsHandler());
- Handle<Code> code = ComputeStoreMonomorphic(
- lookup, strict_mode, receiver, name, value);
- if (code.is_null()) {
- Handle<Code> stub = strict_mode == kStrictMode
- ? generic_stub_strict() : generic_stub();
- set_target(*stub);
- return;
- }
+ Handle<Code> code = ComputeHandler(lookup, receiver, name, value);
- PatchCache(state, strict_mode, receiver, name, code);
- TRACE_IC("StoreIC", name, state, target());
+ PatchCache(receiver, name, code);
+ TRACE_IC("StoreIC", name);
}
-Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
+Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
Handle<JSObject> holder(lookup->holder());
+ StoreStubCompiler compiler(isolate(), strict_mode(), kind());
switch (lookup->type()) {
case FIELD:
- return isolate()->stub_cache()->ComputeStoreField(
- name, receiver, lookup, strict_mode);
+ return compiler.CompileStoreField(receiver, lookup, name);
+ case TRANSITION: {
+ // Explicitly pass in the receiver map since LookupForWrite may have
+ // stored something else than the receiver in the holder.
+ Handle<Map> transition(
+ lookup->GetTransitionTarget(receiver->map()), isolate());
+ int descriptor = transition->LastAdded();
+
+ DescriptorArray* target_descriptors = transition->instance_descriptors();
+ PropertyDetails details = target_descriptors->GetDetails(descriptor);
+
+ if (details.type() == CALLBACKS || details.attributes() != NONE) break;
+
+ return compiler.CompileStoreTransition(
+ receiver, lookup, transition, name);
+ }
case NORMAL:
+ if (kind() == Code::KEYED_STORE_IC) break;
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
@@ -1806,12 +1634,16 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
+ // TODO(verwaest): Turn into a handler.
return isolate()->stub_cache()->ComputeStoreGlobal(
- name, global, cell, value, strict_mode);
+ name, global, cell, value, strict_mode());
}
ASSERT(holder.is_identical_to(receiver));
- return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+ return strict_mode() == kStrictMode
+ ? isolate()->builtins()->StoreIC_Normal_Strict()
+ : isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
+ if (kind() == Code::KEYED_STORE_IC) break;
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
@@ -1819,8 +1651,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
if (v8::ToCData<Address>(info->setter()) == 0) break;
if (!holder->HasFastProperties()) break;
if (!info->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, holder, info, strict_mode);
+ return compiler.CompileStoreCallback(receiver, holder, name, info);
} else if (callback->IsAccessorPair()) {
Handle<Object> setter(
Handle<AccessorPair>::cast(callback)->setter(), isolate());
@@ -1830,14 +1661,12 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
- return isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, holder, call_optimization, strict_mode);
+ call_optimization.IsCompatibleReceiver(*receiver)) {
+ return compiler.CompileStoreCallback(
+ receiver, holder, name, call_optimization);
}
- return isolate()->stub_cache()->ComputeStoreViaSetter(
- name, receiver, holder, Handle<JSFunction>::cast(setter),
- strict_mode);
+ return compiler.CompileStoreViaSetter(
+ receiver, holder, name, Handle<JSFunction>::cast(setter));
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1846,55 +1675,38 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
break;
}
case INTERCEPTOR:
- ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- return isolate()->stub_cache()->ComputeStoreInterceptor(
- name, receiver, strict_mode);
+ if (kind() == Code::KEYED_STORE_IC) break;
+ ASSERT(HasInterceptorSetter(*receiver));
+ return compiler.CompileStoreInterceptor(receiver, name);
case CONSTANT:
break;
- case TRANSITION: {
- // Explicitly pass in the receiver map since LookupForWrite may have
- // stored something else than the receiver in the holder.
- Handle<Map> transition(
- lookup->GetTransitionTarget(receiver->map()), isolate());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() == CALLBACKS || details.attributes() != NONE) break;
-
- return isolate()->stub_cache()->ComputeStoreTransition(
- name, receiver, lookup, transition, strict_mode);
- }
case NONEXISTENT:
case HANDLER:
UNREACHABLE();
break;
}
- return Handle<Code>::null();
+ return slow_stub();
}
Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
- KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
+ KeyedAccessStoreMode store_mode) {
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != Code::NORMAL) {
TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
+ return generic_stub();
}
- State ic_state = target()->ic_state();
Handle<Map> receiver_map(receiver->map(), isolate());
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there.
Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- monomorphic_map, strict_mode, store_mode);
+ monomorphic_map, strict_mode(), store_mode);
}
MapHandleList target_receiver_maps;
@@ -1903,9 +1715,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// In the case that there is a non-map-specific IC is installed (e.g. keyed
// stores into properties in dictionary mode), then there will be not
// receiver maps in the target.
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
+ return generic_stub();
}
// There are several special cases where an IC that is MONOMORPHIC can still
@@ -1915,7 +1725,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
KeyedAccessStoreMode old_store_mode =
Code::GetKeyedAccessStoreMode(target()->extra_ic_state());
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
- if (ic_state == MONOMORPHIC) {
+ if (state() == MONOMORPHIC) {
// If the "old" and "new" maps are in the same elements map family, stay
// MONOMORPHIC and use the map for the most generic ElementsKind.
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1927,7 +1737,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// Element family is the same, use the "worst" case map.
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- transitioned_receiver_map, strict_mode, store_mode);
+ transitioned_receiver_map, strict_mode(), store_mode);
} else if (*previous_receiver_map == receiver->map() &&
old_store_mode == STANDARD_STORE &&
(IsGrowStoreMode(store_mode) ||
@@ -1937,11 +1747,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- receiver_map, strict_mode, store_mode);
+ receiver_map, strict_mode(), store_mode);
}
}
- ASSERT(ic_state != GENERIC);
+ ASSERT(state() != GENERIC);
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
@@ -1957,14 +1767,14 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
+ return generic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
+ return generic_stub();
}
// Make sure all polymorphic handlers have the same store mode, otherwise the
@@ -1975,9 +1785,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
+ return generic_stub();
}
}
@@ -1995,14 +1803,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
external_arrays != target_receiver_maps.length()) {
TRACE_GENERIC_IC(isolate(), "KeyedIC",
"unsupported combination of external and normal arrays");
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
+ return generic_stub();
}
}
return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
- &target_receiver_maps, store_mode, strict_mode);
+ &target_receiver_maps, store_mode, strict_mode());
}
@@ -2125,117 +1931,73 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
}
-MaybeObject* KeyedStoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
ICMissMode miss_mode) {
+ if (MigrateDeprecated(object)) {
+ return Runtime::SetObjectPropertyOrFail(
+ isolate(), object , key, value, NONE, strict_mode());
+ }
+
// Check for values that can be converted into an internalized string directly
// or is representable as a smi.
key = TryConvertKey(key, isolate());
+ MaybeObject* maybe_object = NULL;
+ Handle<Code> stub = generic_stub();
+
if (key->IsInternalizedString()) {
- return StoreIC::Store(state,
- strict_mode,
- object,
- Handle<String>::cast(key),
- value,
- JSReceiver::MAY_BE_STORE_FROM_KEYED);
- }
-
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
- JSObject::cast(*object)->map()->is_observed());
- if (use_ic && !object->IsSmi()) {
- // Don't use ICs for maps of the objects in Array's prototype chain. We
- // expect to be able to trap element sets to objects with those maps in the
- // runtime to enable optimization of element hole access.
- Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
- }
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- if (miss_mode != MISS_FORCE_GENERIC) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
- bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (key_is_smi_like &&
- (target() != *non_strict_arguments_stub())) {
- KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
- stub = StoreElementStub(receiver, store_mode, strict_mode);
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "key not a number");
+ maybe_object = StoreIC::Store(object,
+ Handle<String>::cast(key),
+ value,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED);
+ if (maybe_object->IsFailure()) return maybe_object;
+ } else {
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+ !(FLAG_harmony_observation && object->IsJSObject() &&
+ JSObject::cast(*object)->map()->is_observed());
+ if (use_ic && !object->IsSmi()) {
+ // Don't use ICs for maps of the objects in Array's prototype chain. We
+ // expect to be able to trap element sets to objects with those maps in
+ // the runtime to enable optimization of element hole access.
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+ }
+
+ if (use_ic) {
+ ASSERT(!object->IsJSGlobalProxy());
+
+ if (miss_mode != MISS_FORCE_GENERIC) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (key_is_smi_like &&
+ (!target().is_identical_to(non_strict_arguments_stub()))) {
+ KeyedAccessStoreMode store_mode =
+ GetStoreMode(receiver, key, value);
+ stub = StoreElementStub(receiver, store_mode);
+ }
}
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "not an object");
}
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic");
+ }
+ }
+
+ if (!is_target_set()) {
+ if (*stub == *generic_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
ASSERT(!stub.is_null());
set_target(*stub);
- TRACE_IC("KeyedStoreIC", key, state, target());
+ TRACE_IC("StoreIC", key);
}
+ if (maybe_object) return maybe_object;
return Runtime::SetObjectPropertyOrFail(
- isolate(), object , key, value, NONE, strict_mode);
-}
-
-
-Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup, strict_mode);
- case TRANSITION: {
- // Explicitly pass in the receiver map since LookupForWrite may have
- // stored something else than the receiver in the holder.
- Handle<Map> transition(
- lookup->GetTransitionTarget(receiver->map()), isolate());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() != CALLBACKS && details.attributes() == NONE) {
- return isolate()->stub_cache()->ComputeKeyedStoreTransition(
- name, receiver, lookup, transition, strict_mode);
- }
- // fall through.
- }
- case NORMAL:
- case CONSTANT:
- case CALLBACKS:
- case INTERCEPTOR:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- return (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- return Handle<Code>::null();
+ isolate(), object , key, value, NONE, strict_mode());
}
@@ -2251,12 +2013,10 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- MaybeObject* maybe_result = ic.LoadFunction(state,
- extra_ic_state,
- args.at<Object>(0),
- args.at<String>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
JSFunction* raw_function;
if (!maybe_result->To(&raw_function)) return maybe_result;
@@ -2278,9 +2038,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedCallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- MaybeObject* maybe_result =
- ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
// Result could be a function or a failure.
JSFunction* raw_function = NULL;
if (!maybe_result->To(&raw_function)) return maybe_result;
@@ -2298,8 +2059,10 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<String>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key);
}
@@ -2308,8 +2071,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key, MISS);
}
@@ -2317,8 +2082,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key, MISS);
}
@@ -2326,11 +2093,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state,
- args.at<Object>(0),
- args.at<Object>(1),
- MISS_FORCE_GENERIC);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key, MISS_FORCE_GENERIC);
}
@@ -2339,13 +2105,10 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
@@ -2353,13 +2116,10 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
@@ -2442,14 +2202,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2), MISS);
}
@@ -2457,26 +2213,21 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2), MISS);
}
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ StrictModeFlag strict_mode = ic.strict_mode();
return Runtime::SetObjectProperty(isolate,
object,
key,
@@ -2487,14 +2238,13 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ StrictModeFlag strict_mode = ic.strict_mode();
return Runtime::SetObjectProperty(isolate,
object,
key,
@@ -2508,26 +2258,21 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS_FORCE_GENERIC);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2), MISS_FORCE_GENERIC);
}
RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
- SealHandleScope scope(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> value = args.at<Object>(0);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ StrictModeFlag strict_mode = ic.strict_mode();
return Runtime::SetObjectProperty(isolate,
object,
key,
@@ -2537,11 +2282,6 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
}
-void BinaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
@@ -2556,256 +2296,68 @@ const char* BinaryOpIC::GetName(TypeInfo type_info) {
}
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case INT32:
- case NUMBER:
- case ODDBALL:
- case STRING:
- return MONOMORPHIC;
- case GENERIC:
- return ::v8::internal::GENERIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
-}
+MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
+ Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ BinaryOpStub stub(extra_ic_state);
+ Handle<Type> left_type = stub.GetLeftType(isolate());
+ Handle<Type> right_type = stub.GetRightType(isolate());
+ bool smi_was_enabled = left_type->Maybe(Type::Smi()) &&
+ right_type->Maybe(Type::Smi());
-Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type,
- Isolate* isolate) {
- switch (binary_type) {
- case UNINITIALIZED:
- return handle(Type::None(), isolate);
- case SMI:
- return handle(Type::Smi(), isolate);
- case INT32:
- return handle(Type::Signed32(), isolate);
- case NUMBER:
- return handle(Type::Number(), isolate);
- case ODDBALL:
- return handle(Type::Optional(
- handle(Type::Union(
- handle(Type::Number(), isolate),
- handle(Type::String(), isolate)), isolate)), isolate);
- case STRING:
- return handle(Type::String(), isolate);
- case GENERIC:
- return handle(Type::Any(), isolate);
- }
- UNREACHABLE();
- return handle(Type::Any(), isolate);
-}
-
-
-void BinaryOpIC::StubInfoToType(int minor_key,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
- Isolate* isolate) {
- TypeInfo left_typeinfo, right_typeinfo, result_typeinfo;
- BinaryOpStub::decode_types_from_minor_key(
- minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo);
- *left = TypeInfoToType(left_typeinfo, isolate);
- *right = TypeInfoToType(right_typeinfo, isolate);
- *result = TypeInfoToType(result_typeinfo, isolate);
-}
-
-
-static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
- Token::Value op) {
- v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
- if (type.IsSmi()) return BinaryOpIC::SMI;
- if (type.IsInteger32()) {
- if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- if (type.IsNumber()) return BinaryOpIC::NUMBER;
- if (type.IsString()) return BinaryOpIC::STRING;
- if (value->IsUndefined()) {
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- return BinaryOpIC::ODDBALL;
+ Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
+ if (!result.has_value) return Failure::Exception();
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[");
+ stub.PrintName(&stream);
+
+ stub.UpdateStatus(left, right, result);
+
+ stream.Add(" => ");
+ stub.PrintState(&stream);
+ stream.Add(" ");
+ stream.OutputToStdOut();
+ PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate())));
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF("]\n");
+ } else {
+ stub.UpdateStatus(left, right, result);
}
- return BinaryOpIC::GENERIC;
-}
+#else
+ stub.UpdateStatus(left, right, result);
+#endif
+ Handle<Code> code = stub.GetCode(isolate());
+ set_target(*code);
-static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
- Handle<Object> value,
- Token::Value op) {
- BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
- if (old_type == BinaryOpIC::STRING) {
- if (new_type == BinaryOpIC::STRING) return new_type;
- return BinaryOpIC::GENERIC;
- }
- return Max(old_type, new_type);
-}
+ left_type = stub.GetLeftType(isolate());
+ right_type = stub.GetRightType(isolate());
+ bool enable_smi = left_type->Maybe(Type::Smi()) &&
+ right_type->Maybe(Type::Smi());
+ if (!smi_was_enabled && enable_smi) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ } else if (smi_was_enabled && !enable_smi) {
+ PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+ }
-#ifdef DEBUG
-static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
- BinaryOpIC::TypeInfo right,
- Maybe<int32_t> fixed_right_arg,
- BinaryOpIC::TypeInfo result) {
- PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
- if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
- PrintF("->%s", BinaryOpIC::GetName(result));
+ ASSERT(result.has_value);
+ return static_cast<MaybeObject*>(*result.value);
}
-#endif
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 3);
-
+RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
- int key = args.smi_at(2);
- Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
-
- BinaryOpIC::TypeInfo previous_left, previous_right, previous_result;
- BinaryOpStub::decode_types_from_minor_key(
- key, &previous_left, &previous_right, &previous_result);
-
- BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
- BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
-
- // STRING is only used for ADD operations.
- if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
- op != Token::ADD) {
- new_left = new_right = BinaryOpIC::GENERIC;
- }
-
- BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
- BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
-
- Maybe<int> previous_fixed_right_arg =
- BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
-
- int32_t value;
- bool new_has_fixed_right_arg =
- op == Token::MOD &&
- right->ToInt32(&value) &&
- BinaryOpStub::can_encode_arg_value(value) &&
- (previous_overall == BinaryOpIC::UNINITIALIZED ||
- (previous_fixed_right_arg.has_value &&
- previous_fixed_right_arg.value == value));
- Maybe<int32_t> new_fixed_right_arg(
- new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
-
- if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
- if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
- if (op == Token::DIV ||
- op == Token::MUL ||
- op == Token::SHR ||
- SmiValuesAre32Bits()) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = BinaryOpIC::INT32;
- }
- }
- if (new_overall == BinaryOpIC::INT32 &&
- previous_overall == BinaryOpIC::INT32) {
- if (new_left == previous_left && new_right == previous_right) {
- result_type = BinaryOpIC::NUMBER;
- }
- }
- }
-
- BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
- Handle<Code> code = stub.GetCode(isolate);
- if (!code.is_null()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC in ");
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" ");
- TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
- previous_result);
- PrintF(" => ");
- TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
- PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
- }
-#endif
- BinaryOpIC ic(isolate);
- ic.patch(*code);
-
- // Activate inlined smi code.
- if (previous_overall == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
- }
- }
-
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(isolate,
- builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
+ BinaryOpIC ic(isolate);
+ return ic.Transition(left, right);
}
@@ -3032,16 +2584,16 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
- return ic.target();
+ return ic.raw_target();
}
void CompareNilIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
Code::ExtraICState state = target->extended_extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
@@ -3106,6 +2658,47 @@ RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
}
+Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
+ switch (op) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ return Builtins::ADD;
+ break;
+ case Token::SUB:
+ return Builtins::SUB;
+ break;
+ case Token::MUL:
+ return Builtins::MUL;
+ break;
+ case Token::DIV:
+ return Builtins::DIV;
+ break;
+ case Token::MOD:
+ return Builtins::MOD;
+ break;
+ case Token::BIT_OR:
+ return Builtins::BIT_OR;
+ break;
+ case Token::BIT_AND:
+ return Builtins::BIT_AND;
+ break;
+ case Token::BIT_XOR:
+ return Builtins::BIT_XOR;
+ break;
+ case Token::SAR:
+ return Builtins::SAR;
+ break;
+ case Token::SHR:
+ return Builtins::SHR;
+ break;
+ case Token::SHL:
+ return Builtins::SHL;
+ break;
+ }
+}
+
+
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
Code::ExtraICState extra_ic_state) {
ToBooleanStub stub(extra_ic_state);
@@ -3121,8 +2714,8 @@ RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) {
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
ToBooleanIC ic(isolate);
- Code::ExtraICState ic_state = ic.target()->extended_extra_ic_state();
- return ic.ToBoolean(object, ic_state);
+ Code::ExtraICState extra_ic_state = ic.target()->extended_extra_ic_state();
+ return ic.ToBoolean(object, extra_ic_state);
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 8f09e1d0a2..fde4bc77a5 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -57,8 +57,8 @@ namespace internal {
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
- ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
+ ICU(BinaryOpIC_Miss) \
ICU(CompareNilIC_Miss) \
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
@@ -95,11 +95,17 @@ class IC {
virtual ~IC() {}
// Get the call-site target; used for determining the state.
- Code* target() const { return GetTargetAtAddress(address()); }
+ Handle<Code> target() const { return target_; }
+ Code* raw_target() const { return GetTargetAtAddress(address()); }
+
+ State state() const { return state_; }
inline Address address() const;
// Compute the current IC state based on the target stub, receiver and name.
- static State StateFrom(Code* target, Object* receiver, Object* name);
+ void UpdateState(Handle<Object> receiver, Handle<Object> name);
+ void MarkMonomorphicPrototypeFailure() {
+ state_ = MONOMORPHIC_PROTOTYPE_FAILURE;
+ }
// Clear the inline cache to initial state.
static void Clear(Isolate* isolate, Address address);
@@ -128,12 +134,15 @@ class IC {
// These methods should not be called with undefined or null.
static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
JSObject* holder);
- static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
- JSObject* holder);
static inline JSObject* GetCodeCacheHolder(Isolate* isolate,
Object* object,
InlineCacheHolderFlag holder);
+ static bool IsCleared(Code* code) {
+ InlineCacheState state = code->ic_state();
+ return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
@@ -146,15 +155,17 @@ class IC {
#endif
// Set the call-site target.
- void set_target(Code* code) { SetTargetAtAddress(address(), code); }
+ void set_target(Code* code) {
+ SetTargetAtAddress(address(), code);
+ target_set_ = true;
+ }
+
+ bool is_target_set() { return target_set_; }
#ifdef DEBUG
char TransitionMarkFromState(IC::State state);
- void TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target);
+ void TraceIC(const char* type, Handle<Object> name);
#endif
Failure* TypeError(const char* type,
@@ -167,51 +178,52 @@ class IC {
static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(Address address, Code* target, Code* old_target);
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- set_target(*handler);
- }
- bool UpdatePolymorphicIC(State state,
- Handle<HeapObject> receiver,
- Handle<String> name,
- Handle<Code> code,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
+ // Compute the handler either by compiling or by retrieving a cached version.
+ Handle<Code> ComputeHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value = Handle<Code>::null());
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value) {
UNREACHABLE();
return Handle<Code>::null();
- };
+ }
+ void UpdateMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name);
+
+ bool UpdatePolymorphicIC(Handle<HeapObject> receiver,
+ Handle<String> name,
+ Handle<Code> code);
void CopyICToMegamorphicCache(Handle<String> name);
bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
- void PatchCache(State state,
- StrictModeFlag strict_mode,
- Handle<HeapObject> receiver,
+ void PatchCache(Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
- virtual Handle<Code> megamorphic_stub() {
+ virtual Code::Kind kind() const {
UNREACHABLE();
- return Handle<Code>::null();
+ return Code::STUB;
}
- virtual Handle<Code> megamorphic_stub_strict() {
+ virtual Handle<Code> slow_stub() const {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual Handle<Code> generic_stub() const {
+ virtual Handle<Code> megamorphic_stub() {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual Handle<Code> generic_stub_strict() const {
+ virtual Handle<Code> generic_stub() const {
UNREACHABLE();
return Handle<Code>::null();
}
+ virtual StrictModeFlag strict_mode() const { return kNonStrictMode; }
+ bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name);
+ void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name);
private:
// Frame pointer for the frame that uses (calls) the IC.
@@ -225,6 +237,11 @@ class IC {
Isolate* isolate_;
+ // The original code target that missed.
+ Handle<Code> target_;
+ State state_;
+ bool target_set_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -251,31 +268,23 @@ class CallICBase: public IC {
class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
// Returns a JSFunction or a Failure.
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
Handle<String> name);
protected:
CallICBase(Code::Kind kind, Isolate* isolate)
: IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
- bool TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state);
+ virtual Code::ExtraICState extra_ic_state() { return Code::kNoExtraICState; }
// Compute a monomorphic stub if possible, otherwise return a null handle.
Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name);
// Update the inline cache and the global stub cache based on the lookup
// result.
void UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
@@ -302,6 +311,9 @@ class CallICBase: public IC {
Code::Kind kind,
Code::ExtraICState extra_state);
+ virtual Handle<Code> megamorphic_stub();
+ virtual Handle<Code> pre_monomorphic_stub();
+
Code::Kind kind_;
friend class IC;
@@ -310,7 +322,9 @@ class CallICBase: public IC {
class CallIC: public CallICBase {
public:
- explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
+ explicit CallIC(Isolate* isolate)
+ : CallICBase(Code::CALL_IC, isolate),
+ extra_ic_state_(target()->extra_ic_state()) {
ASSERT(target()->is_call_stub());
}
@@ -335,6 +349,13 @@ class CallIC: public CallICBase {
CallICBase::GenerateNormal(masm, argc);
GenerateMiss(masm, argc, Code::kNoExtraICState);
}
+ bool TryUpdateExtraICState(LookupResult* lookup, Handle<Object> object);
+
+ protected:
+ virtual Code::ExtraICState extra_ic_state() { return extra_ic_state_; }
+
+ private:
+ Code::ExtraICState extra_ic_state_;
};
@@ -345,8 +366,7 @@ class KeyedCallIC: public CallICBase {
ASSERT(target()->is_keyed_call_stub());
}
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
Handle<Object> key);
// Code generator routines.
@@ -381,8 +401,7 @@ class LoadIC: public IC {
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
Handle<String> name);
protected:
@@ -399,34 +418,33 @@ class LoadIC: public IC {
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state,
Handle<Object> object,
Handle<String> name);
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name);
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> unused);
private:
// Stub accessors.
static Handle<Code> initialize_stub(Isolate* isolate) {
return isolate->builtins()->LoadIC_Initialize();
}
+
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->LoadIC_PreMonomorphic();
+ }
+
virtual Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->LoadIC_PreMonomorphic();
+ return pre_monomorphic_stub(isolate());
}
+ Handle<Code> SimpleFieldLoad(int offset,
+ bool inobject = true,
+ Representation representation =
+ Representation::Tagged());
+
static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
@@ -446,8 +464,7 @@ class KeyedLoadIC: public LoadIC {
ASSERT(target()->is_keyed_load_stub());
}
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
Handle<Object> key,
ICMissMode force_generic);
@@ -487,14 +504,6 @@ class KeyedLoadIC: public LoadIC {
return isolate()->builtins()->KeyedLoadIC_Slow();
}
- // Update the inline cache.
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
- virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
private:
@@ -502,8 +511,11 @@ class KeyedLoadIC: public LoadIC {
static Handle<Code> initialize_stub(Isolate* isolate) {
return isolate->builtins()->KeyedLoadIC_Initialize();
}
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+ }
virtual Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
+ return pre_monomorphic_stub(isolate());
}
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
@@ -523,10 +535,14 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC {
public:
- StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ StoreIC(FrameDepth depth, Isolate* isolate)
+ : IC(depth, isolate),
+ strict_mode_(Code::GetStrictMode(target()->extra_ic_state())) {
ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
}
+ virtual StrictModeFlag strict_mode() const { return strict_mode_; }
+
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
@@ -541,8 +557,6 @@ class StoreIC: public IC {
StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* Store(
- State state,
- StrictModeFlag strict_mode,
Handle<Object> object,
Handle<String> name,
Handle<Object> value,
@@ -552,58 +566,60 @@ class StoreIC: public IC {
protected:
virtual Code::Kind kind() const { return Code::STORE_IC; }
virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->StoreIC_Megamorphic();
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Megamorphic_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Megamorphic();
+ }
}
// Stub accessors.
- virtual Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->StoreIC_Megamorphic_Strict();
- }
virtual Handle<Code> generic_stub() const {
- return isolate()->builtins()->StoreIC_Generic();
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Generic();
+ }
}
- virtual Handle<Code> generic_stub_strict() const {
- return isolate()->builtins()->StoreIC_Generic_Strict();
+
+ virtual Handle<Code> slow_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Slow_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Slow();
+ }
}
- virtual Handle<Code> pre_monomorphic_stub() const {
- return isolate()->builtins()->StoreIC_PreMonomorphic();
+
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate(), strict_mode());
}
- virtual Handle<Code> pre_monomorphic_stub_strict() const {
- return isolate()->builtins()->StoreIC_PreMonomorphic_Strict();
+
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->StoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->StoreIC_PreMonomorphic();
+ }
}
+
virtual Handle<Code> global_proxy_stub() {
- return isolate()->builtins()->StoreIC_GlobalProxy();
- }
- virtual Handle<Code> global_proxy_stub_strict() {
- return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_GlobalProxy();
+ }
}
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode);
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value);
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Object> value);
private:
void set_target(Code* code) {
@@ -613,14 +629,19 @@ class StoreIC: public IC {
IC::set_target(code);
}
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->StoreIC_Initialize();
- }
- static Handle<Code> initialize_stub_strict(Isolate* isolate) {
- return isolate->builtins()->StoreIC_Initialize_Strict();
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->StoreIC_Initialize_Strict();
+ } else {
+ return isolate->builtins()->StoreIC_Initialize();
+ }
}
+
static void Clear(Isolate* isolate, Address address, Code* target);
+ StrictModeFlag strict_mode_;
+
friend class IC;
};
@@ -644,9 +665,7 @@ class KeyedStoreIC: public StoreIC {
ASSERT(target()->is_keyed_store_stub());
}
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Store(Handle<Object> object,
Handle<Object> name,
Handle<Object> value,
ICMissMode force_generic);
@@ -668,56 +687,62 @@ class KeyedStoreIC: public StoreIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
- virtual Handle<Code> pre_monomorphic_stub() const {
- return isolate()->builtins()->KeyedStoreIC_PreMonomorphic();
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate(), strict_mode());
}
- virtual Handle<Code> pre_monomorphic_stub_strict() const {
- return isolate()->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+ }
}
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedStoreIC_Generic();
+ virtual Handle<Code> slow_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Slow_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Slow();
+ }
}
- virtual Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ virtual Handle<Code> megamorphic_stub() {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
}
Handle<Code> StoreElementStub(Handle<JSObject> receiver,
- KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode);
-
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
+ KeyedAccessStoreMode store_mode);
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
+ ASSERT(Code::GetStrictMode(code->extra_ic_state()) == strict_mode());
IC::set_target(code);
}
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->KeyedStoreIC_Initialize();
- }
- static Handle<Code> initialize_stub_strict(Isolate* isolate) {
- return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
- }
- Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedStoreIC_Generic();
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_Initialize();
+ }
}
- Handle<Code> generic_stub_strict() const {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+
+ virtual Handle<Code> generic_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
}
+
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
}
@@ -748,22 +773,14 @@ class BinaryOpIC: public IC {
GENERIC
};
- static void StubInfoToType(int minor_key,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
- Isolate* isolate);
-
- explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+ explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- void patch(Code* code);
+ static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
static const char* GetName(TypeInfo type_info);
- static State ToState(TypeInfo type_info);
-
- private:
- static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate);
+ MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
+ Handle<Object> right);
};
@@ -870,6 +887,7 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index df0f14a74c..4223dde211 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -648,6 +648,8 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+ heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
+
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Running\n");
@@ -726,7 +728,7 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
+#if ENABLE_SLOW_ASSERTS
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 45076f5657..764bcb8bf3 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -48,6 +48,11 @@ SaveContext::SaveContext(Isolate* isolate)
}
+bool Isolate::IsCodePreAgingActive() {
+ return FLAG_optimize_for_size && FLAG_age_code && !IsDebuggerActive();
+}
+
+
bool Isolate::IsDebuggerActive() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!NoBarrier_Load(&debugger_initialized_)) return false;
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 6fa496a902..71cd301581 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -42,7 +42,6 @@
#include "isolate-inl.h"
#include "lithium-allocator.h"
#include "log.h"
-#include "marking-thread.h"
#include "messages.h"
#include "platform.h"
#include "regexp-stack.h"
@@ -121,11 +120,7 @@ void ThreadLocalTop::InitializeInternal() {
void ThreadLocalTop::Initialize() {
InitializeInternal();
#ifdef USE_SIMULATOR
-#if V8_TARGET_ARCH_ARM
simulator_ = Simulator::current(isolate_);
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = Simulator::current(isolate_);
-#endif
#endif
thread_id_ = ThreadId::Current();
}
@@ -147,8 +142,6 @@ int SystemThreadManager::NumberOfParallelSystemThreads(
return number_of_threads;
} else if (type == CONCURRENT_SWEEPING) {
return number_of_threads - 1;
- } else if (type == PARALLEL_MARKING) {
- return number_of_threads;
}
return 1;
}
@@ -345,6 +338,14 @@ Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
Mutex Isolate::process_wide_mutex_;
+// TODO(dcarney): Remove with default isolate.
+enum DefaultIsolateStatus {
+ kDefaultIsolateUninitialized,
+ kDefaultIsolateInitialized,
+ kDefaultIsolateCrashIfInitialized
+};
+static DefaultIsolateStatus default_isolate_status_
+ = kDefaultIsolateUninitialized;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
@@ -382,8 +383,16 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
}
+void Isolate::SetCrashIfDefaultIsolateInitialized() {
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateInitialized);
+ default_isolate_status_ = kDefaultIsolateCrashIfInitialized;
+}
+
+
void Isolate::EnsureDefaultIsolate() {
LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
@@ -1087,7 +1096,7 @@ Failure* Isolate::StackOverflow() {
Handle<String> key = factory()->stack_overflow_string();
Handle<JSObject> boilerplate =
Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
- Handle<JSObject> exception = Copy(boilerplate);
+ Handle<JSObject> exception = JSObject::Copy(boilerplate);
DoThrow(*exception, NULL);
// Get stack trace limit.
@@ -1657,11 +1666,7 @@ char* Isolate::RestoreThread(char* from) {
// This might be just paranoia, but it seems to be needed in case a
// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
-#if V8_TARGET_ARCH_ARM
thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
- thread_local_top()->simulator_ = Simulator::current(this);
-#endif
#endif
ASSERT(context() == NULL || context()->IsContext());
return from + sizeof(ThreadLocalTop);
@@ -1776,7 +1781,6 @@ Isolate::Isolate()
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
- is_memory_constrained_(false),
has_fatal_error_(false),
use_crankshaft_(true),
initialized_from_snapshot_(false),
@@ -1784,8 +1788,7 @@ Isolate::Isolate()
heap_profiler_(NULL),
function_entry_hook_(NULL),
deferred_handles_head_(NULL),
- optimizing_compiler_thread_(this),
- marking_thread_(NULL),
+ optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
@@ -1879,7 +1882,10 @@ void Isolate::Deinit() {
debugger()->UnloadDebugger();
#endif
- if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Stop();
+ if (FLAG_concurrent_recompilation) {
+ optimizing_compiler_thread_->Stop();
+ delete optimizing_compiler_thread_;
+ }
if (FLAG_sweeper_threads > 0) {
for (int i = 0; i < FLAG_sweeper_threads; i++) {
@@ -1889,14 +1895,6 @@ void Isolate::Deinit() {
delete[] sweeper_thread_;
}
- if (FLAG_marking_threads > 0) {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- marking_thread_[i]->Stop();
- delete marking_thread_[i];
- }
- delete[] marking_thread_;
- }
-
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
if (FLAG_print_deopt_stress) {
@@ -1911,7 +1909,7 @@ void Isolate::Deinit() {
deoptimizer_data_ = NULL;
if (FLAG_preemption) {
v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StopPreemption();
+ v8::Locker::StopPreemption(reinterpret_cast<v8::Isolate*>(this));
}
builtins_.TearDown();
bootstrapper_->TearDown();
@@ -2219,6 +2217,11 @@ bool Isolate::Init(Deserializer* des) {
deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
+ if (FLAG_concurrent_recompilation) {
+ optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+ optimizing_compiler_thread_->Start();
+ }
+
const bool create_heap_objects = (des == NULL);
if (create_heap_objects && !heap_.CreateHeapObjects()) {
V8::FatalProcessOutOfMemory("heap object creation");
@@ -2248,7 +2251,7 @@ bool Isolate::Init(Deserializer* des) {
if (FLAG_preemption) {
v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StartPreemption(100);
+ v8::Locker::StartPreemption(reinterpret_cast<v8::Isolate*>(this), 100);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2318,21 +2321,13 @@ bool Isolate::Init(Deserializer* des) {
DONT_TRACK_ALLOCATION_SITE, 0);
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ BinaryOpStub::InitializeForIsolate(this);
CompareNilICStub::InitializeForIsolate(this);
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
- }
-
- if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start();
-
- if (FLAG_marking_threads > 0) {
- marking_thread_ = new MarkingThread*[FLAG_marking_threads];
- for (int i = 0; i < FLAG_marking_threads; i++) {
- marking_thread_[i] = new MarkingThread(this);
- marking_thread_[i]->Start();
- }
+ NumberToStringStub::InstallDescriptors(this);
}
if (FLAG_sweeper_threads > 0) {
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index b826ec596a..9aa14ee025 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -75,7 +75,6 @@ class HTracer;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
-class MarkingThread;
class PreallocatedMemoryThread;
class RandomNumberGenerator;
class RegExpStack;
@@ -274,10 +273,8 @@ class ThreadLocalTop BASE_EMBEDDED {
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
-#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
// the external callback we're currently in
@@ -308,7 +305,6 @@ class SystemThreadManager {
enum ParallelSystemComponent {
PARALLEL_SWEEPING,
CONCURRENT_SWEEPING,
- PARALLEL_MARKING,
PARALLEL_RECOMPILATION
};
@@ -497,6 +493,7 @@ class Isolate {
bool IsDefaultIsolate() const { return this == default_isolate_; }
+ static void SetCrashIfDefaultIsolateInitialized();
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
@@ -753,6 +750,19 @@ class Isolate {
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
+
+ // TODO(yangguo): temporary wrappers
+ bool MayNamedAccessWrapper(Handle<JSObject> receiver,
+ Handle<Object> key,
+ v8::AccessType type) {
+ return MayNamedAccess(*receiver, *key, type);
+ }
+ bool MayIndexedAccessWrapper(Handle<JSObject> receiver,
+ uint32_t index,
+ v8::AccessType type) {
+ return MayIndexedAccess(*receiver, index, type);
+ }
+
bool MayNamedAccess(JSObject* receiver,
Object* key,
v8::AccessType type);
@@ -984,6 +994,8 @@ class Isolate {
void PreallocatedStorageDelete(void* p);
void PreallocatedStorageInit(size_t size);
+ inline bool IsCodePreAgingActive();
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger() {
if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
@@ -1098,7 +1110,7 @@ class Isolate {
#endif // DEBUG
OptimizingCompilerThread* optimizing_compiler_thread() {
- return &optimizing_compiler_thread_;
+ return optimizing_compiler_thread_;
}
// PreInits and returns a default isolate. Needed when a new thread tries
@@ -1106,10 +1118,6 @@ class Isolate {
// TODO(svenpanne) This method is on death row...
static v8::Isolate* GetDefaultIsolateForLocking();
- MarkingThread** marking_threads() {
- return marking_thread_;
- }
-
SweeperThread** sweeper_threads() {
return sweeper_thread_;
}
@@ -1131,13 +1139,6 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
- bool is_memory_constrained() const {
- return is_memory_constrained_;
- }
- void set_is_memory_constrained(bool value) {
- is_memory_constrained_ = value;
- }
-
private:
Isolate();
@@ -1310,7 +1311,6 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
RandomNumberGenerator* random_number_generator_;
- bool is_memory_constrained_;
// True if fatal error has been signaled for this isolate.
bool has_fatal_error_;
@@ -1368,8 +1368,7 @@ class Isolate {
#endif
DeferredHandles* deferred_handles_head_;
- OptimizingCompilerThread optimizing_compiler_thread_;
- MarkingThread** marking_thread_;
+ OptimizingCompilerThread* optimizing_compiler_thread_;
SweeperThread** sweeper_thread_;
// Counts deopt points if deopt_every_n_times is enabled.
@@ -1378,7 +1377,6 @@ class Isolate {
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
- friend class MarkingThread;
friend class OptimizingCompilerThread;
friend class SweeperThread;
friend class ThreadManager;
@@ -1426,9 +1424,9 @@ class SaveContext BASE_EMBEDDED {
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange()
- : isolate_(Isolate::Current()),
- context_(isolate_->context()) { }
+ explicit AssertNoContextChange(Isolate* isolate)
+ : isolate_(isolate),
+ context_(isolate->context(), isolate) { }
~AssertNoContextChange() {
ASSERT(isolate_->context() == *context_);
}
@@ -1438,32 +1436,7 @@ class AssertNoContextChange BASE_EMBEDDED {
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
-#endif
-};
-
-
-// TODO(mstarzinger): Depracate as soon as everything is handlified.
-class AssertNoContextChangeWithHandleScope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- AssertNoContextChangeWithHandleScope() :
- isolate_(Isolate::Current()),
- scope_(isolate_),
- context_(isolate_->context(), isolate_) {
- }
-
- ~AssertNoContextChangeWithHandleScope() {
- ASSERT(isolate_->context() == *context_);
- }
-
- private:
- Isolate* isolate_;
- HandleScope scope_;
- Handle<Context> context_;
-#else
- public:
- AssertNoContextChangeWithHandleScope() { }
+ explicit AssertNoContextChange(Isolate* isolate) { }
#endif
};
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index b0e14e1965..c21e6351d4 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -181,7 +181,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
}
}
// Undefined or a callable object.
- return void 0;
+ return UNDEFINED;
}
@@ -236,5 +236,5 @@ function JSONSerializeAdapter(key, object) {
var holder = {};
holder[key] = object;
// No need to pass the actual holder since there is no replacer function.
- return JSONSerialize(key, holder, void 0, new InternalArray(), "", "");
+ return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
}
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 0e4e35bb41..41666deb26 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -84,7 +84,7 @@ class List {
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
- ASSERT(i < length_);
+ SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index 8cca19b2ef..deee98877d 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -145,16 +145,14 @@ void UseIterator::Advance() {
}
-void LAllocator::SetLiveRangeAssignedRegister(
- LiveRange* range,
- int reg,
- RegisterKind register_kind) {
- if (register_kind == DOUBLE_REGISTERS) {
+void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
+ if (range->Kind() == DOUBLE_REGISTERS) {
assigned_double_registers_->Add(reg);
} else {
+ ASSERT(range->Kind() == GENERAL_REGISTERS);
assigned_registers_->Add(reg);
}
- range->set_assigned_register(reg, register_kind, chunk()->zone());
+ range->set_assigned_register(reg, chunk()->zone());
}
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 3c5abd1984..29c31942e4 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -131,7 +131,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
LiveRange::LiveRange(int id, Zone* zone)
: id_(id),
spilled_(false),
- is_double_(false),
+ kind_(UNALLOCATED_REGISTERS),
assigned_register_(kInvalidAssignment),
last_interval_(NULL),
first_interval_(NULL),
@@ -145,12 +145,9 @@ LiveRange::LiveRange(int id, Zone* zone)
spill_start_index_(kMaxInt) { }
-void LiveRange::set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone) {
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
- is_double_ = (register_kind == DOUBLE_REGISTERS);
ConvertOperands(zone);
}
@@ -234,10 +231,15 @@ LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
- if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register(), zone);
- } else {
- op = LRegister::Create(assigned_register(), zone);
+ switch (Kind()) {
+ case GENERAL_REGISTERS:
+ op = LRegister::Create(assigned_register(), zone);
+ break;
+ case DOUBLE_REGISTERS:
+ op = LDoubleRegister::Create(assigned_register(), zone);
+ break;
+ default:
+ UNREACHABLE();
}
} else if (IsSpilled()) {
ASSERT(!HasRegisterAssigned());
@@ -352,6 +354,7 @@ void LiveRange::SplitAt(LifetimePosition position,
// Link the new live range in the chain before any of the other
// ranges linked from the range before the split.
result->parent_ = (parent_ == NULL) ? this : parent_;
+ result->kind_ = result->parent_->kind_;
result->next_ = next_;
next_ = result;
@@ -553,7 +556,7 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
reusable_slots_(8, zone()),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
- mode_(GENERAL_REGISTERS),
+ mode_(UNALLOCATED_REGISTERS),
num_registers_(-1),
graph_(graph),
has_osr_entry_(false),
@@ -653,7 +656,8 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
if (result == NULL) {
result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS);
+ result->kind_ = GENERAL_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
fixed_live_ranges_[index] = result;
}
return result;
@@ -667,7 +671,8 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
chunk()->zone());
ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS);
+ result->kind_ = DOUBLE_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
fixed_double_live_ranges_[index] = result;
}
return result;
@@ -1375,6 +1380,12 @@ void LAllocator::BuildLiveRanges() {
}
#endif
}
+
+ for (int i = 0; i < live_ranges_.length(); ++i) {
+ if (live_ranges_[i] != NULL) {
+ live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+ }
+ }
}
@@ -1481,6 +1492,7 @@ void LAllocator::PopulatePointerMaps() {
void LAllocator::AllocateGeneralRegisters() {
LAllocatorPhase phase("L_Allocate general registers", this);
num_registers_ = Register::NumAllocatableRegisters();
+ mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
@@ -1498,7 +1510,7 @@ void LAllocator::AllocateRegisters() {
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
- if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
+ if (live_ranges_[i]->Kind() == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
@@ -1518,6 +1530,7 @@ void LAllocator::AllocateRegisters() {
}
}
} else {
+ ASSERT(mode_ == GENERAL_REGISTERS);
for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
LiveRange* current = fixed_live_ranges_.at(i);
if (current != NULL) {
@@ -1812,7 +1825,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index),
current->id());
- SetLiveRangeAssignedRegister(current, register_index, mode_);
+ SetLiveRangeAssignedRegister(current, register_index);
return true;
}
}
@@ -1847,7 +1860,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_);
+ SetLiveRangeAssignedRegister(current, reg);
return true;
}
@@ -1932,7 +1945,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_);
+ SetLiveRangeAssignedRegister(current, reg);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
@@ -2149,7 +2162,7 @@ void LAllocator::Spill(LiveRange* range) {
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
+ if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
first->SetSpillOperand(op);
}
range->MakeSpilled(chunk()->zone());
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index e5edd3cf03..9908ea823d 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -146,6 +146,7 @@ class LifetimePosition {
enum RegisterKind {
+ UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
@@ -290,9 +291,7 @@ class LiveRange: public ZoneObject {
LOperand* CreateAssignedOperand(Zone* zone);
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone);
+ void set_assigned_register(int reg, Zone* zone);
void MakeSpilled(Zone* zone);
// Returns use position in this live range that follows both start
@@ -323,7 +322,7 @@ class LiveRange: public ZoneObject {
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
- bool IsDouble() const { return is_double_; }
+ RegisterKind Kind() const { return kind_; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
@@ -392,7 +391,7 @@ class LiveRange: public ZoneObject {
int id_;
bool spilled_;
- bool is_double_;
+ RegisterKind kind_;
int assigned_register_;
UseInterval* last_interval_;
UseInterval* first_interval_;
@@ -406,6 +405,8 @@ class LiveRange: public ZoneObject {
LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
+
+ friend class LAllocator; // Assigns to kind_.
};
@@ -568,9 +569,7 @@ class LAllocator BASE_EMBEDDED {
HBasicBlock* block,
HBasicBlock* pred);
- inline void SetLiveRangeAssignedRegister(LiveRange* range,
- int reg,
- RegisterKind register_kind);
+ inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
// Return parallel move that should be used to connect ranges split at the
// given position.
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
new file mode 100644
index 0000000000..19ebe7e516
--- /dev/null
+++ b/deps/v8/src/lithium-codegen.cc
@@ -0,0 +1,150 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-codegen.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+HGraph* LCodeGenBase::graph() const {
+ return chunk()->graph();
+}
+
+
+LCodeGenBase::LCodeGenBase(LChunk* chunk,
+ MacroAssembler* assembler,
+ CompilationInfo* info)
+ : chunk_(static_cast<LPlatformChunk*>(chunk)),
+ masm_(assembler),
+ info_(info),
+ zone_(info->zone()),
+ status_(UNUSED),
+ current_block_(-1),
+ current_instruction_(-1),
+ instructions_(chunk->instructions()),
+ last_lazy_deopt_pc_(0) {
+}
+
+
+bool LCodeGenBase::GenerateBody() {
+ ASSERT(is_generating());
+ bool emit_instructions = true;
+ LCodeGen* codegen = static_cast<LCodeGen*>(this);
+ for (current_instruction_ = 0;
+ !is_aborted() && current_instruction_ < instructions_->length();
+ current_instruction_++) {
+ LInstruction* instr = instructions_->at(current_instruction_);
+
+ // Don't emit code for basic blocks with a replacement.
+ if (instr->IsLabel()) {
+ emit_instructions = !LLabel::cast(instr)->HasReplacement() &&
+ (!FLAG_unreachable_code_elimination ||
+ instr->hydrogen_value()->block()->IsReachable());
+ if (FLAG_code_comments && !emit_instructions) {
+ Comment(
+ ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) "
+ "--------------------",
+ current_instruction_,
+ instr->hydrogen_value()->id(),
+ instr->hydrogen_value()->block()->block_id());
+ }
+ }
+ if (!emit_instructions) continue;
+
+ if (FLAG_code_comments && instr->HasInterestingComment(codegen)) {
+ Comment(";;; <@%d,#%d> %s",
+ current_instruction_,
+ instr->hydrogen_value()->id(),
+ instr->Mnemonic());
+ }
+
+ GenerateBodyInstructionPre(instr);
+
+ HValue* value = instr->hydrogen_value();
+ if (value->position() != RelocInfo::kNoPosition) {
+ ASSERT(!graph()->info()->IsOptimizing() ||
+ !FLAG_emit_opt_code_positions ||
+ value->position() != RelocInfo::kNoPosition);
+ RecordAndWritePosition(value->position());
+ }
+
+ instr->CompileToNative(codegen);
+
+ GenerateBodyInstructionPost(instr);
+ }
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ return !is_aborted();
+}
+
+
+void LCodeGenBase::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ size_t length = builder.position();
+ Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
+ masm()->RecordComment(copy.start());
+}
+
+
+int LCodeGenBase::GetNextEmittedBlock() const {
+ for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!chunk_->GetLabel(i)->HasReplacement()) return i;
+ }
+ return -1;
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
new file mode 100644
index 0000000000..9caab8127d
--- /dev/null
+++ b/deps/v8/src/lithium-codegen.h
@@ -0,0 +1,96 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_CODEGEN_H_
+#define V8_LITHIUM_CODEGEN_H_
+
+#include "v8.h"
+
+#include "compiler.h"
+
+namespace v8 {
+namespace internal {
+
+class LInstruction;
+class LPlatformChunk;
+
+class LCodeGenBase BASE_EMBEDDED {
+ public:
+ LCodeGenBase(LChunk* chunk,
+ MacroAssembler* assembler,
+ CompilationInfo* info);
+ virtual ~LCodeGenBase() {}
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
+ LPlatformChunk* chunk() const { return chunk_; }
+ HGraph* graph() const;
+
+ void FPRINTF_CHECKING Comment(const char* format, ...);
+
+ bool GenerateBody();
+ virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
+ virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
+
+ virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
+ virtual void RecordAndWritePosition(int position) = 0;
+
+ int GetNextEmittedBlock() const;
+
+ protected:
+ enum Status {
+ UNUSED,
+ GENERATING,
+ DONE,
+ ABORTED
+ };
+
+ LPlatformChunk* const chunk_;
+ MacroAssembler* const masm_;
+ CompilationInfo* const info_;
+ Zone* zone_;
+ Status status_;
+ int current_block_;
+ int current_instruction_;
+ const ZoneList<LInstruction*>* instructions_;
+ int last_lazy_deopt_pc_;
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_generating() const { return status_ == GENERATING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_LITHIUM_CODEGEN_H_
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index fa837c7ede..1be4b0654b 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -229,7 +229,7 @@ void LPointerMap::PrintTo(StringStream* stream) {
if (i != 0) stream->Add(";");
pointer_operands_[i]->PrintTo(stream);
}
- stream->Add("} @%d", position());
+ stream->Add("}");
}
@@ -490,6 +490,14 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
}
+LInstruction* LChunkBuilder::CheckElideControlInstruction(
+ HControlInstruction* instr) {
+ HBasicBlock* successor;
+ if (!instr->KnownSuccessorBlock(&successor)) return NULL;
+ return new(zone()) LGoto(successor);
+}
+
+
LPhase::~LPhase() {
if (ShouldProduceTraceOutput()) {
isolate()->GetHTracer()->TraceLithium(name(), chunk_);
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index fd50ee8f8b..4f84087835 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -476,10 +476,9 @@ class LParallelMove V8_FINAL : public ZoneObject {
class LPointerMap V8_FINAL : public ZoneObject {
public:
- explicit LPointerMap(int position, Zone* zone)
+ explicit LPointerMap(Zone* zone)
: pointer_operands_(8, zone),
untagged_operands_(0, zone),
- position_(position),
lithium_position_(-1) { }
const ZoneList<LOperand*>* GetNormalizedOperands() {
@@ -489,7 +488,6 @@ class LPointerMap V8_FINAL : public ZoneObject {
untagged_operands_.Clear();
return &pointer_operands_;
}
- int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
@@ -505,7 +503,6 @@ class LPointerMap V8_FINAL : public ZoneObject {
private:
ZoneList<LOperand*> pointer_operands_;
ZoneList<LOperand*> untagged_operands_;
- int position_;
int lithium_position_;
};
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index 451b146bde..4618eda366 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -186,7 +186,7 @@ Debug.LiveEdit = new function() {
// to old version.
if (link_to_old_script_list.length == 0) {
%LiveEditReplaceScript(script, new_source, null);
- old_script = void 0;
+ old_script = UNDEFINED;
} else {
var old_script_name = CreateNameForOldScript(script);
@@ -221,7 +221,7 @@ Debug.LiveEdit = new function() {
change_log.push( {position_patched: position_patch_report} );
for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account wether it's source_changed or
+ // TODO(LiveEdit): take into account whether it's source_changed or
// unchanged and whether positions changed at all.
PatchPositions(update_positions_list[i], diff_array,
position_patch_report);
@@ -266,7 +266,7 @@ Debug.LiveEdit = new function() {
// LiveEdit itself believe that any function in heap that points to a
// particular script is a regular function.
// For some functions we will restore this link later.
- %LiveEditFunctionSetScript(info.shared_function_info, void 0);
+ %LiveEditFunctionSetScript(info.shared_function_info, UNDEFINED);
compile_info.push(info);
old_index_map.push(i);
}
@@ -288,7 +288,7 @@ Debug.LiveEdit = new function() {
}
}
- // After sorting update outer_inder field using old_index_map. Also
+ // After sorting update outer_index field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
@@ -542,16 +542,16 @@ Debug.LiveEdit = new function() {
this.children = children;
// an index in array of compile_info
this.array_index = array_index;
- this.parent = void 0;
+ this.parent = UNDEFINED;
this.status = FunctionStatus.UNCHANGED;
// Status explanation is used for debugging purposes and will be shown
// in user UI if some explanations are needed.
- this.status_explanation = void 0;
- this.new_start_pos = void 0;
- this.new_end_pos = void 0;
- this.corresponding_node = void 0;
- this.unmatched_new_nodes = void 0;
+ this.status_explanation = UNDEFINED;
+ this.new_start_pos = UNDEFINED;
+ this.new_end_pos = UNDEFINED;
+ this.corresponding_node = UNDEFINED;
+ this.unmatched_new_nodes = UNDEFINED;
// 'Textual' correspondence/matching is weaker than 'pure'
// correspondence/matching. We need 'textual' level for visual presentation
@@ -559,10 +559,10 @@ Debug.LiveEdit = new function() {
// Sometimes only function body is changed (functions in old and new script
// textually correspond), but we cannot patch the code, so we see them
// as an old function deleted and new function created.
- this.textual_corresponding_node = void 0;
- this.textually_unmatched_new_nodes = void 0;
+ this.textual_corresponding_node = UNDEFINED;
+ this.textually_unmatched_new_nodes = UNDEFINED;
- this.live_shared_function_infos = void 0;
+ this.live_shared_function_infos = UNDEFINED;
}
// From array of function infos that is implicitly a tree creates
@@ -692,10 +692,10 @@ Debug.LiveEdit = new function() {
ProcessInternals(code_info_tree);
}
- // For ecah old function (if it is not damaged) tries to find a corresponding
+ // For each old function (if it is not damaged) tries to find a corresponding
// function in new script. Typically it should succeed (non-damaged functions
// by definition may only have changes inside their bodies). However there are
- // reasons for corresponence not to be found; function with unmodified text
+ // reasons for correspondence not to be found; function with unmodified text
// in new script may become enclosed into other function; the innocent change
// inside function body may in fact be something like "} function B() {" that
// splits a function into 2 functions.
@@ -703,7 +703,13 @@ Debug.LiveEdit = new function() {
// A recursive function that tries to find a correspondence for all
// child functions and for their inner functions.
- function ProcessChildren(old_node, new_node) {
+ function ProcessNode(old_node, new_node) {
+ var scope_change_description =
+ IsFunctionContextLocalsChanged(old_node.info, new_node.info);
+ if (scope_change_description) {
+ old_node.status = FunctionStatus.CHANGED;
+ }
+
var old_children = old_node.children;
var new_children = new_node.children;
@@ -729,13 +735,20 @@ Debug.LiveEdit = new function() {
new_children[new_index];
old_children[old_index].textual_corresponding_node =
new_children[new_index];
- if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
- ProcessChildren(old_children[old_index],
+ if (scope_change_description) {
+ old_children[old_index].status = FunctionStatus.DAMAGED;
+ old_children[old_index].status_explanation =
+ "Enclosing function is now incompatible. " +
+ scope_change_description;
+ old_children[old_index].corresponding_node = UNDEFINED;
+ } else if (old_children[old_index].status !=
+ FunctionStatus.UNCHANGED) {
+ ProcessNode(old_children[old_index],
new_children[new_index]);
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
unmatched_new_nodes_list.push(
old_children[old_index].corresponding_node);
- old_children[old_index].corresponding_node = void 0;
+ old_children[old_index].corresponding_node = UNDEFINED;
old_node.status = FunctionStatus.CHANGED;
}
}
@@ -772,11 +785,10 @@ Debug.LiveEdit = new function() {
}
if (old_node.status == FunctionStatus.CHANGED) {
- var why_wrong_expectations =
- WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
- if (why_wrong_expectations) {
+ if (old_node.info.param_num != new_node.info.param_num) {
old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = why_wrong_expectations;
+ old_node.status_explanation = "Changed parameter number: " +
+ old_node.info.param_num + " and " + new_node.info.param_num;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
@@ -784,7 +796,7 @@ Debug.LiveEdit = new function() {
textually_unmatched_new_nodes_list;
}
- ProcessChildren(old_code_tree, new_code_tree);
+ ProcessNode(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
old_code_tree.textual_corresponding_node = new_code_tree;
@@ -856,7 +868,7 @@ Debug.LiveEdit = new function() {
this.raw_array = raw_array;
}
- // Changes positions (including all statments) in function.
+ // Changes positions (including all statements) in function.
function PatchPositions(old_info_node, diff_array, report_array) {
if (old_info_node.live_shared_function_infos) {
old_info_node.live_shared_function_infos.forEach(function (info) {
@@ -878,15 +890,9 @@ Debug.LiveEdit = new function() {
return script.name + " (old)";
}
- // Compares a function interface old and new version, whether it
+ // Compares a function scope heap structure, old and new version, whether it
// changed or not. Returns explanation if they differ.
- function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
- // Check that function has the same number of parameters (there may exist
- // an adapter, that won't survive function parameter number change).
- if (function_info1.param_num != function_info2.param_num) {
- return "Changed parameter number: " + function_info1.param_num +
- " and " + function_info2.param_num;
- }
+ function IsFunctionContextLocalsChanged(function_info1, function_info2) {
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
@@ -905,8 +911,8 @@ Debug.LiveEdit = new function() {
}
if (scope_info1_text != scope_info2_text) {
- return "Incompatible variable maps: [" + scope_info1_text +
- "] and [" + scope_info2_text + "]";
+ return "Variable map changed: [" + scope_info1_text +
+ "] => [" + scope_info2_text + "]";
}
// No differences. Return undefined.
return;
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index feaafd471e..3d459d4ffb 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -731,8 +731,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
- void SetOuterScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kOuterScopeInfoOffset_, scope_info_array);
+ void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
+ this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(info);
@@ -771,7 +771,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4;
static const int kCodeScopeInfoOffset_ = 5;
- static const int kOuterScopeInfoOffset_ = 6;
+ static const int kFunctionScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
static const int kLiteralNumOffset_ = 9;
@@ -880,7 +880,7 @@ class FunctionInfoListener {
Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
isolate());
- info.SetOuterScopeInfo(scope_info_list);
+ info.SetFunctionScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
@@ -897,14 +897,12 @@ class FunctionInfoListener {
// Saves some description of scope. It stores name and indexes of
// variables in the whole scope chain. Null-named slots delimit
// scopes of this chain.
- Scope* outer_scope = scope->outer_scope();
- if (outer_scope == NULL) {
- return isolate()->heap()->undefined_value();
- }
- do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
- outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+ Scope* current_scope = scope;
+ while (current_scope != NULL) {
+ ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_list(
+ current_scope->ContextLocalCount(), zone);
+ current_scope->CollectStackAndContextLocals(&stack_list, &context_list);
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
@@ -924,8 +922,8 @@ class FunctionInfoListener {
isolate()));
scope_info_length++;
- outer_scope = outer_scope->outer_scope();
- } while (outer_scope != NULL);
+ current_scope = current_scope->outer_scope();
+ }
return *scope_info_list;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 0f0ad40398..b353f548fb 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -212,7 +212,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line) {
+ Name* source, int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendString(shared->DebugName());
@@ -1232,10 +1232,11 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source, int line, int column) {
- PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
+ PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line));
+ CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line,
+ column));
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1610,7 +1611,12 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: // fall through
+ case Code::BINARY_OP_IC: {
+ BinaryOpStub stub(code_object->extended_extra_ic_state());
+ description = stub.GetName().Detach();
+ tag = Logger::STUB_TAG;
+ break;
+ }
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
@@ -1629,6 +1635,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A builtin from the snapshot";
tag = Logger::BUILTIN_TAG;
break;
+ case Code::HANDLER:
+ description = "An IC handler from the snapshot";
+ tag = Logger::HANDLER_TAG;
+ break;
case Code::KEYED_LOAD_IC:
description = "A keyed load IC from the snapshot";
tag = Logger::KEYED_LOAD_IC_TAG;
@@ -1765,15 +1775,14 @@ void Logger::LogAccessorCallbacks() {
static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
- if (isolate->IsDefaultIsolate()) return;
+ if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return;
stream->Add("isolate-%p-", isolate);
}
static SmartArrayPointer<const char> PrepareLogFileName(
Isolate* isolate, const char* file_name) {
- if (strchr(file_name, '%') != NULL ||
- !isolate->IsDefaultIsolate()) {
+ if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) {
// If there's a '%' in the log file name we have to expand
// placeholders.
HeapStringAllocator allocator;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 81d45e507b..c0efd6504d 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -131,6 +131,7 @@ struct TickSample;
V(CALLBACK_TAG, "Callback") \
V(EVAL_TAG, "Eval") \
V(FUNCTION_TAG, "Function") \
+ V(HANDLER_TAG, "Handler") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
@@ -470,7 +471,7 @@ class CodeEventListener {
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source,
- int line) = 0;
+ int line, int column) = 0;
virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
int args_count) = 0;
@@ -509,7 +510,7 @@ class CodeEventLogger : public CodeEventListener {
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source,
- int line);
+ int line, int column);
virtual void RegExpCodeCreateEvent(Code* code, String* source);
virtual void CallbackEvent(Name* name, Address entry_point) { }
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index d699c14621..1785d44a8c 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -157,6 +157,11 @@ macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
+# Constants. The compiler constant folds them.
+const NAN = $NaN;
+const INFINITY = (1/0);
+const UNDEFINED = (void 0);
+
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 263de4878f..b75ddb382b 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -38,7 +38,6 @@
#include "ic-inl.h"
#include "incremental-marking.h"
#include "mark-compact.h"
-#include "marking-thread.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "stub-cache.h"
@@ -92,10 +91,8 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !rinfo->target_object()->IsMap() ||
- !Map::cast(rinfo->target_object())->CanTransition()) {
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
+ rinfo->target_object())) {
VisitPointer(rinfo->target_object_address());
}
}
@@ -408,6 +405,8 @@ void MarkCompactCollector::CollectGarbage() {
ASSERT(state_ == PREPARE_GC);
ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
+ heap()->allocation_mementos_found_ = 0;
+
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
@@ -432,9 +431,8 @@ void MarkCompactCollector::CollectGarbage() {
#endif
#ifdef VERIFY_HEAP
- if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
- heap()->weak_embedded_maps_verification_enabled()) {
- VerifyWeakEmbeddedMapsInOptimizedCode();
+ if (heap()->weak_embedded_objects_verification_enabled()) {
+ VerifyWeakEmbeddedObjectsInOptimizedCode();
}
if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
@@ -450,6 +448,11 @@ void MarkCompactCollector::CollectGarbage() {
marking_parity_ = EVEN_MARKING_PARITY;
}
+ if (FLAG_trace_track_allocation_sites &&
+ heap()->allocation_mementos_found_ > 0) {
+ PrintF("AllocationMementos found during mark-sweep = %d\n",
+ heap()->allocation_mementos_found_);
+ }
tracer_ = NULL;
}
@@ -495,7 +498,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
-void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
HeapObjectIterator code_iterator(heap()->code_space());
for (HeapObject* obj = code_iterator.Next();
obj != NULL;
@@ -503,7 +506,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
Code* code = Code::cast(obj);
if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
if (WillBeDeoptimized(code)) continue;
- code->VerifyEmbeddedMapsDependency();
+ code->VerifyEmbeddedObjectsDependency();
}
}
@@ -601,20 +604,6 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
}
-void MarkCompactCollector::MarkInParallel() {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- isolate()->marking_threads()[i]->StartMarking();
- }
-}
-
-
-void MarkCompactCollector::WaitUntilMarkingCompleted() {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- isolate()->marking_threads()[i]->WaitForMarkingThread();
- }
-}
-
-
bool Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -1481,7 +1470,7 @@ class MarkCompactMarkingVisitor
// Mark the backing hash table without pushing it on the marking stack.
Object* table_object = weak_collection->table();
if (!table_object->IsHashTable()) return;
- ObjectHashTable* table = ObjectHashTable::cast(table_object);
+ WeakHashTable* table = WeakHashTable::cast(table_object);
Object** table_slot =
HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
MarkBit table_mark = Marking::MarkBitFrom(table);
@@ -1581,13 +1570,11 @@ void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
fixed_array->map() != heap->fixed_double_array_map() &&
fixed_array != heap->empty_fixed_array()) {
if (fixed_array->IsDictionary()) {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- dictionary_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(dictionary_type,
+ fixed_array->Size());
} else {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- fast_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(fast_type,
+ fixed_array->Size());
}
}
}
@@ -1597,7 +1584,7 @@ void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
int object_size = obj->Size();
- heap->RecordObjectStats(map->instance_type(), -1, object_size);
+ heap->RecordObjectStats(map->instance_type(), object_size);
non_count_table_.GetVisitorById(id)(map, obj);
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
@@ -1630,25 +1617,20 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
if (map_obj->owns_descriptors() &&
array != heap->empty_descriptor_array()) {
int fixed_array_size = array->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
+ heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
}
if (map_obj->HasTransitionArray()) {
int fixed_array_size = map_obj->transitions()->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
+ heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
}
if (map_obj->has_code_cache()) {
CodeCache* cache = CodeCache::cast(map_obj->code_cache());
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- MAP_CODE_CACHE_SUB_TYPE,
- cache->default_cache()->Size());
+ heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
+ cache->default_cache()->Size());
if (!cache->normal_type_cache()->IsUndefined()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
MAP_CODE_CACHE_SUB_TYPE,
FixedArray::cast(cache->normal_type_cache())->Size());
}
@@ -1666,7 +1648,9 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
int object_size = obj->Size();
ASSERT(map->instance_type() == CODE_TYPE);
- heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
+ Code* code_obj = Code::cast(obj);
+ heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetAge(),
+ object_size);
ObjectStatsVisitBase(kVisitCode, map, obj);
}
};
@@ -1680,8 +1664,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
SCOPE_INFO_SUB_TYPE,
FixedArray::cast(sfi->scope_info())->Size());
}
@@ -1698,8 +1681,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
FixedArray* fixed_array = FixedArray::cast(obj);
if (fixed_array == heap->string_table()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
STRING_TABLE_SUB_TYPE,
fixed_array->Size());
}
@@ -2017,6 +1999,13 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
+ if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
+ if (AllocationMemento::FindForJSObject(JSObject::cast(object), true)
+ != NULL) {
+ heap()->allocation_mementos_found_++;
+ }
+ }
+
offset++;
current_cell >>= 1;
// Aggressively promote young survivors to the old space.
@@ -2116,6 +2105,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Handle the string table specially.
MarkStringTable(visitor);
+ MarkWeakObjectToCodeTable();
+
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
RefillMarkingDeque();
@@ -2156,6 +2147,16 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
}
+void MarkCompactCollector::MarkWeakObjectToCodeTable() {
+ HeapObject* weak_object_to_code_table =
+ HeapObject::cast(heap()->weak_object_to_code_table());
+ if (!IsMarked(weak_object_to_code_table)) {
+ MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
+ SetMark(weak_object_to_code_table, mark);
+ }
+}
+
+
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
@@ -2523,7 +2524,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (map_mark.Get()) {
ClearNonLiveDependentCode(map->dependent_code());
} else {
- ClearAndDeoptimizeDependentCode(map);
+ ClearAndDeoptimizeDependentCode(map->dependent_code());
+ map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
}
@@ -2537,6 +2539,31 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
}
}
+
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ uint32_t capacity = table->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ Object* value = table->get(value_index);
+ if (IsMarked(key)) {
+ if (!IsMarked(value)) {
+ HeapObject* obj = HeapObject::cast(value);
+ MarkBit mark = Marking::MarkBitFrom(obj);
+ SetMark(obj, mark);
+ }
+ ClearNonLiveDependentCode(DependentCode::cast(value));
+ } else {
+ ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
+ table->set(key_index, heap_->the_hole_value());
+ table->set(value_index, heap_->the_hole_value());
+ }
+ }
+ }
}
@@ -2602,9 +2629,9 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
}
-void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
+void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
+ DependentCode* entries) {
DisallowHeapAllocation no_allocation;
- DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
@@ -2620,7 +2647,6 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
}
entries->clear_at(i);
}
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
@@ -2726,10 +2752,12 @@ void MarkCompactCollector::MigrateObject(Address dst,
Address src,
int size,
AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- // TODO(hpayer): Replace these checks with asserts.
- CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
- CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(src, dst, size);
+ }
+ ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
+ ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
@@ -2910,7 +2938,9 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
ASSERT(target_space == heap()->old_pointer_space() ||
target_space == heap()->old_data_space());
Object* result;
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
+ MaybeObject* maybe_result = target_space->AllocateRaw(
+ object_size,
+ PagedSpace::MOVE_OBJECT);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(),
@@ -2983,7 +3013,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int size = object->Size();
- MaybeObject* target = space->AllocateRaw(size);
+ MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
if (target->IsFailure()) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
@@ -3459,6 +3489,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
heap_->string_table()->Iterate(&updating_visitor);
+ updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ table->Iterate(&updating_visitor);
+ table->Rehash(heap_->undefined_value());
+ }
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index df2f782113..aea5e1cf66 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -637,7 +637,7 @@ class MarkCompactCollector {
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedMapsInOptimizedCode();
+ void VerifyWeakEmbeddedObjectsInOptimizedCode();
void VerifyOmittedMapChecks();
#endif
@@ -735,10 +735,9 @@ class MarkCompactCollector {
return sequential_sweeping_;
}
- // Parallel marking support.
- void MarkInParallel();
-
- void WaitUntilMarkingCompleted();
+ // Mark the global table which maps weak objects to dependent code without
+ // marking its contents.
+ void MarkWeakObjectToCodeTable();
private:
MarkCompactCollector();
@@ -889,7 +888,7 @@ class MarkCompactCollector {
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearAndDeoptimizeDependentCode(Map* map);
+ void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
void ClearNonLiveDependentCode(DependentCode* dependent_code);
// Marking detaches initial maps from SharedFunctionInfo objects
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 9ba1934b85..efab63a186 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -45,59 +45,51 @@ var $Math = new MathConstructor();
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
if (%_IsSmi(x)) return x >= 0 ? x : -x;
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ x = TO_NUMBER_INLINE(x);
if (x === 0) return 0; // To handle -0.
return x > 0 ? x : -x;
}
// ECMA 262 - 15.8.2.2
function MathAcos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_acos(x);
+ return %Math_acos(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.3
function MathAsin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_asin(x);
+ return %Math_asin(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.4
function MathAtan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan(x);
+ return %Math_atan(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.5
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2(y, x) {
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan2(y, x);
+ return %Math_atan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_ceil(x);
+ return %Math_ceil(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.7
function MathCos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathCos(x);
+ return %_MathCos(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.8
function MathExp(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_exp(x);
+ return %Math_exp(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ x = TO_NUMBER_INLINE(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
if (x < 0x80000000 && x > 0) {
@@ -113,16 +105,15 @@ function MathFloor(x) {
// ECMA 262 - 15.8.2.10
function MathLog(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathLog(x);
+ return %_MathLog(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ arg1 = TO_NUMBER_INLINE(arg1);
+ arg2 = TO_NUMBER_INLINE(arg2);
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
@@ -131,9 +122,9 @@ function MathMax(arg1, arg2) { // length == 2
return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
+ return NAN;
}
- var r = -1/0; // Compiler constant-folds this to -Infinity.
+ var r = -INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
@@ -151,8 +142,8 @@ function MathMax(arg1, arg2) { // length == 2
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ arg1 = TO_NUMBER_INLINE(arg1);
+ arg2 = TO_NUMBER_INLINE(arg2);
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
@@ -161,9 +152,9 @@ function MathMin(arg1, arg2) { // length == 2
return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
+ return NAN;
}
- var r = 1/0; // Compiler constant-folds this to Infinity.
+ var r = INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
@@ -179,9 +170,7 @@ function MathMin(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.13
function MathPow(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %_MathPow(x, y);
+ return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
// ECMA 262 - 15.8.2.14
@@ -191,33 +180,27 @@ function MathRandom() {
// ECMA 262 - 15.8.2.15
function MathRound(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %RoundNumber(x);
+ return %RoundNumber(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.16
function MathSin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSin(x);
+ return %_MathSin(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSqrt(x);
+ return %_MathSqrt(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.18
function MathTan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathTan(x);
+ return %_MathTan(TO_NUMBER_INLINE(x));
}
// Non-standard extension.
function MathImul(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %NumberImul(x, y);
+ return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 2debbf8654..0a301228d7 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -796,7 +796,7 @@ function CallSite(receiver, fun, pos, strict_mode) {
}
function CallSiteGetThis() {
- return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteReceiverKey];
+ return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteReceiverKey];
}
function CallSiteGetTypeName() {
@@ -826,7 +826,7 @@ function CallSiteGetScriptNameOrSourceURL() {
}
function CallSiteGetFunction() {
- return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteFunctionKey];
+ return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteFunctionKey];
}
function CallSiteGetFunctionName() {
@@ -1092,7 +1092,7 @@ function FormatStackTrace(obj, error_string, frames) {
var array = [];
%MoveArrayContents(frames, array);
formatting_custom_stack_trace = true;
- var stack_trace = void 0;
+ var stack_trace = UNDEFINED;
try {
stack_trace = $Error.prepareStackTrace(obj, array);
} catch (e) {
@@ -1160,7 +1160,7 @@ function captureStackTrace(obj, cons_opt) {
// Turn this accessor into a data property.
%DefineOrRedefineDataProperty(obj, 'stack', result, NONE);
// Release context values.
- stack = error_string = void 0;
+ stack = error_string = UNDEFINED;
return result;
};
@@ -1171,7 +1171,7 @@ function captureStackTrace(obj, cons_opt) {
%DefineOrRedefineDataProperty(this, 'stack', v, NONE);
if (this === obj) {
// Release context values if holder is the same as the receiver.
- stack = error_string = void 0;
+ stack = error_string = UNDEFINED;
}
};
@@ -1213,7 +1213,7 @@ function SetUpError() {
// Define all the expected properties directly on the error
// object. This avoids going through getters and setters defined
// on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'stack', UNDEFINED, DONT_ENUM);
if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
@@ -1251,7 +1251,7 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
while (error && !%HasLocalProperty(error, name)) {
error = %GetPrototype(error);
}
- if (error === null) return void 0;
+ if (IS_NULL(error)) return UNDEFINED;
if (!IS_OBJECT(error)) return error[name];
// If the property is an accessor on one of the predefined errors that can be
// generated statically by the compiler, don't touch it. This is to address
@@ -1260,11 +1260,11 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
if (desc && desc[IS_ACCESSOR_INDEX]) {
var isName = name === "name";
if (error === $ReferenceError.prototype)
- return isName ? "ReferenceError" : void 0;
+ return isName ? "ReferenceError" : UNDEFINED;
if (error === $SyntaxError.prototype)
- return isName ? "SyntaxError" : void 0;
+ return isName ? "SyntaxError" : UNDEFINED;
if (error === $TypeError.prototype)
- return isName ? "TypeError" : void 0;
+ return isName ? "TypeError" : UNDEFINED;
}
// Otherwise, read normally.
return error[name];
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 2fa6804d19..de91051ed0 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -261,6 +261,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
static const int kNoCodeAgeSequenceLength = 7;
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 345b642454..0972a8295c 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -48,6 +48,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index cb0896a8de..2468c3c340 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -72,18 +72,25 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
static const int kSizeInBytes = 4;
+ static const int kCpRegister = 23; // cp (s7) is the 23rd register.
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
- return reg.code() - 2; // zero_reg and 'at' are skipped.
+ ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
+ reg.is(from_code(kCpRegister)));
+ return reg.is(from_code(kCpRegister)) ?
+ kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
+ reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index + 2); // zero_reg and 'at' are skipped.
+ return index == kMaxNumAllocatableRegisters - 1 ?
+ from_code(kCpRegister) : // Last index is always the 'cp' register.
+ from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
@@ -102,7 +109,7 @@ struct Register {
"t4",
"t5",
"t6",
- "t7",
+ "s7",
};
return names[index];
}
@@ -404,28 +411,49 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- return (supported_ & (1u << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ unsigned mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, unsigned set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static unsigned flag2set(CpuFeature f) {
+ return 1u << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_only_;
+ static unsigned cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 3aabd97b97..0b495831b9 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -201,14 +201,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = a2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- a0, // Input.
- argument, // Result.
- a3, // Scratch.
- t0, // Scratch.
- t1, // Scratch.
- &not_cached);
+ __ LookupNumberStringCache(a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
__ bind(&argument_is_string);
@@ -833,14 +831,15 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
- // a1 - function object
+ // a1 - isolate
RegList saved_regs =
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a1);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(saved_regs);
__ Jump(a0);
}
@@ -858,6 +857,49 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ __ mov(a0, ra);
+ // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ // Restore the original return address of the function
+ __ mov(ra, at);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - isolate
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ __ MultiPop(saved_regs);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+ // Jump to point after the code-age stub.
+ __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
+ __ Jump(a0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -925,23 +967,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- RegList saved_regs =
- (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
- __ MultiPush(saved_regs);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ MultiPop(saved_regs);
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -984,6 +1009,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 0589bf0162..e334b2896e 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -171,7 +182,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &a0;
+ descriptor->stack_parameter_count_ = a0;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -193,7 +204,7 @@ static void InitializeInternalArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// Stack param count needs (constructor pointer, and single argument).
- descriptor->stack_parameter_count_ = &a0;
+ descriptor->stack_parameter_count_ = a0;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -536,23 +547,27 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch3 =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
- DoubleRegister double_scratch = kLithiumScratchDouble.low();
- DoubleRegister double_input = f12;
+ DoubleRegister double_scratch = kLithiumScratchDouble;
__ Push(scratch, scratch2, scratch3);
- __ ldc1(double_input, MemOperand(input_reg, double_offset));
-
if (!skip_fastpath()) {
+ // Load double input.
+ __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
__ ctc1(zero_reg, FCSR);
+
// Try a conversion to a signed integer.
- __ trunc_w_d(double_scratch, double_input);
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
__ mfc1(result_reg, double_scratch);
+
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
__ ctc1(scratch2, FCSR);
+
// Check for overflow and NaNs.
__ And(
scratch, scratch,
@@ -565,7 +580,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ Move(input_low, input_high, double_input);
+
+ __ lw(input_low, MemOperand(input_reg, double_offset));
+ __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -994,105 +1011,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ sra(mask, mask, kSmiTagSize + 1);
- __ Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ sra(scratch, object, 1); // Shift away the tag.
- __ And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch, scratch, kPointerSizeLog2 + 1);
- __ Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ lw(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ lw(a1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime);
- __ DropAndRet(1);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -1316,958 +1234,18 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in v0.
-// Register heap_number_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// a0: Left value (least significant part of mantissa).
-// a1: Left value (sign, exponent, top of mantissa).
-// a2: Right value (least significant part of mantissa).
-// a3: Right value (sign, exponent, top of mantissa).
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // FPU is a base requirement for V8.
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(a1, a0);
-
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = a1;
- Register right = a0;
-
- Register scratch1 = t0;
- Register scratch2 = t1;
-
- ASSERT(right.is(a0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::MUL: {
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(scratch1, right);
- // Do multiplication.
- // lo = lower 32 bits of scratch1 * left.
- // hi = higher 32 bits of scratch1 * left.
- __ Mult(left, scratch1);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
- // Go slow on zero result to handle -0.
- __ mflo(v0);
- __ Ret(ne, v0, Operand(zero_reg));
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ Addu(scratch2, right, left);
- Label skip;
- // ARM uses the 'pl' condition, which is 'ge'.
- // Negating it results in 'lt'.
- __ Branch(&skip, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
- __ bind(&skip);
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- }
- break;
- case Token::DIV: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by zero before getting the result.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividsor (right) is
- // positive, otherwise it is a -0 case.
- // Quotient is in 'lo', remainder is in 'hi'.
- // Check for no remainder first.
- __ mfhi(scratch1);
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- __ mflo(scratch1);
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch1);
- }
- break;
- case Token::MOD: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by 0 before calling mfhi.
- // Check for zero on the right hand side.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividend (left) is
- // positive (or 0), otherwise it is a -0 case.
- // Remainder is in 'hi'.
- __ mfhi(scratch2);
- __ Branch(&done, ne, scratch2, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch1, scratch2, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch2);
- }
- break;
- case Token::BIT_OR:
- __ Ret(USE_DELAY_SLOT);
- __ or_(v0, left, right);
- break;
- case Token::BIT_AND:
- __ Ret(USE_DELAY_SLOT);
- __ and_(v0, left, right);
- break;
- case Token::BIT_XOR:
- __ Ret(USE_DELAY_SLOT);
- __ xor_(v0, left, right);
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(scratch1, left, scratch1);
- // Smi tag result.
- __ And(v0, scratch1, ~kSmiTagMask);
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(v0, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ And(scratch1, v0, Operand(0xc0000000));
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- // Smi tag result.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0);
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT);
- __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into f12 and f14.
- if (smi_operands) {
- __ SmiUntag(scratch1, a0);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ SmiUntag(scratch1, a1);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- } else {
- // Load right operand to f14.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using FPU registers:
- // f12: Left value.
- // f14: Right value.
- switch (op) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(a3, left);
- __ SmiUntag(a2, right);
- } else {
- // Convert operands to 32-bit integers. Right in a2 and left in a3.
- __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
- }
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi.
- __ Addu(a3, a2, Operand(0x40000000));
- __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = t1;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // a2: Answer as signed int32.
- // t1: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to v0, which is the
- // result.
- __ mov(v0, t1);
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Branch(&right_arg_changed,
- ne,
- a0,
- Operand(Smi::FromInt(fixed_right_arg_value())));
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ GetObjectType(left, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- FPURegister double_scratch = f0;
- FPURegister single_scratch = f6;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ Or(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
-
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- Register except_flag = scratch2;
- const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
- kRoundToMinusInf : kRoundToZero;
- const CheckForInexactConversion kConversion = op_ == Token::DIV ?
- kCheckForInexactConversion : kDontCheckForInexactConversion;
- __ EmitFPUTruncate(kRoundingMode,
- scratch1,
- f10,
- at,
- f16,
- except_flag,
- kConversion);
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
- __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch2, f11);
- __ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(&transition, ne, scratch2, Operand(zero_reg));
- __ bind(&not_zero);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, scratch1);
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using FPU registers so s0 is available.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sdc1(f10,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Move(f16, fixed_right_arg_value());
- __ BranchF(&transition, NULL, ne, f14, f16);
- }
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ Branch(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in a2 and left in a3. The
- // registers a0 and a1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(
- left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
- __ LoadNumberAsInt32(
- right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- __ And(a2, a2, Operand(0x1f));
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- __ And(a2, a2, Operand(0x1f));
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (a2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- break;
- case Token::SHL:
- __ And(a2, a2, Operand(0x1f));
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch1, a2, Operand(0x40000000));
- // If not try to return a heap number. (We know the result is an int32.)
- __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
- // Tag the result and return.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- __ bind(&return_heap_number);
- heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ sdc1(double_scratch,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&check, ne, a1, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, ne, a0, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = a1;
- Register right = a0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ GetObjectType(left, a2, a2);
- __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(a0) && !result.is(a1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ Branch(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, overwritable_operand);
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(a1, a0);
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
@@ -2737,6 +1715,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
@@ -2795,8 +1774,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+ __ PrepareCallCFunction(2, 0, a1);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
}
ExternalReference scope_depth =
@@ -2875,7 +1855,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true);
+ __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
// Check if we should retry or throw exception.
Label retry;
@@ -3408,8 +2388,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = a0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -4156,7 +3135,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
DirectCEntryStub stub;
stub.GenerateCall(masm, t9);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
// v0: result
// subject: subject string (callee saved)
@@ -4424,6 +3403,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4444,9 +3424,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in a3.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ lw(t1, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, t1, Operand(at));
@@ -4485,6 +3462,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
1 << 5 | // a1
1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
@@ -5803,33 +4781,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5863,13 +4819,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
__ bind(&done);
@@ -6222,9 +5172,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // No need to pop or drop anything, LeaveExitFrame will restore the old
- // stack, thus dropping the allocated space for the return value.
- // The saved ra is after the reserved stack space for the 4 args.
+ // Make place for arguments to fit C calling convention. Most of the callers
+ // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+ // so they handle stack restoring and we don't have to do that here.
+ // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+ // kCArgsSlotsSize stack space after the call.
+ __ Subu(sp, sp, Operand(kCArgsSlotsSize));
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Call(t9); // Call the C++ function.
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -6241,33 +5198,11 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ Move(t9, target);
- __ AssertStackIsAligned();
- // Allocate space for arg slots.
- __ Subu(sp, sp, kCArgsSlotsSize);
-
- // Block the trampoline pool through the whole function to make sure the
- // number of generated instructions is constant.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-
- // We need to get the current 'pc' value, which is not available on MIPS.
- Label find_ra;
- masm->bal(&find_ra); // ra = pc + 8.
- masm->nop(); // Branch delay slot nop.
- masm->bind(&find_ra);
-
- const int kNumInstructionsToJump = 6;
- masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
- // Push return address (accessible to GC through exit frame pc).
- // This spot for ra was reserved in EnterExitFrame.
- masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- // Call the function.
- masm->Jump(t9);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+ __ Move(t9, target);
+ __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+ __ Call(ra);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 8c9d22ae5d..10531a8002 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -268,31 +268,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
@@ -480,22 +455,6 @@ class RecordWriteStub: public PlatformCodeStub {
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 5c847fc8f6..ec6649533f 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -156,8 +156,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- masm->TestJSArrayForAllocationMemento(a2, t0, eq,
- allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
}
// Set transitioned map.
@@ -188,7 +187,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -316,7 +315,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -540,52 +539,67 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
- Label done;
+ Label zero, infinity, done;
__ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ Move(result, kDoubleRegZero);
- __ BranchF(&done, NULL, ge, double_scratch1, input);
+ __ BranchF(&zero, NULL, ge, double_scratch1, input);
+
__ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ ldc1(result, ExpConstant(2, temp3));
- __ BranchF(&done, NULL, ge, input, double_scratch2);
+ __ BranchF(&infinity, NULL, ge, input, double_scratch2);
+
__ ldc1(double_scratch1, ExpConstant(3, temp3));
__ ldc1(result, ExpConstant(4, temp3));
__ mul_d(double_scratch1, double_scratch1, input);
__ add_d(double_scratch1, double_scratch1, result);
- __ Move(temp2, temp1, double_scratch1);
+ __ FmoveLow(temp2, double_scratch1);
__ sub_d(double_scratch1, double_scratch1, result);
__ ldc1(result, ExpConstant(6, temp3));
__ ldc1(double_scratch2, ExpConstant(5, temp3));
__ mul_d(double_scratch1, double_scratch1, double_scratch2);
__ sub_d(double_scratch1, double_scratch1, input);
__ sub_d(result, result, double_scratch1);
- __ mul_d(input, double_scratch1, double_scratch1);
- __ mul_d(result, result, input);
- __ srl(temp1, temp2, 11);
+ __ mul_d(double_scratch2, double_scratch1, double_scratch1);
+ __ mul_d(result, result, double_scratch2);
__ ldc1(double_scratch2, ExpConstant(7, temp3));
__ mul_d(result, result, double_scratch2);
__ sub_d(result, result, double_scratch1);
- __ ldc1(double_scratch2, ExpConstant(8, temp3));
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ Move(double_scratch2, 1);
__ add_d(result, result, double_scratch2);
- __ li(at, 0x7ff);
- __ And(temp2, temp2, at);
+ __ srl(temp1, temp2, 11);
+ __ Ext(temp2, temp2, 0, 11);
__ Addu(temp1, temp1, Operand(0x3ff));
- __ sll(temp1, temp1, 20);
// Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
__ sll(at, temp2, 3);
- __ addu(at, at, temp3);
- __ lw(at, MemOperand(at));
- __ Addu(temp3, temp3, Operand(kPointerSize));
- __ sll(temp2, temp2, 3);
- __ addu(temp2, temp2, temp3);
- __ lw(temp2, MemOperand(temp2));
- __ Or(temp1, temp1, temp2);
- __ Move(input, at, temp1);
- __ mul_d(result, result, input);
+ __ Addu(temp3, temp3, Operand(at));
+ __ lw(temp2, MemOperand(temp3, 0));
+ __ lw(temp3, MemOperand(temp3, kPointerSize));
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ sll(at, temp1, 20);
+ __ Or(temp1, temp3, at);
+ __ Move(double_scratch1, temp2, temp1);
+ } else {
+ __ sll(at, temp1, 20);
+ __ Or(temp1, temp2, at);
+ __ Move(double_scratch1, temp3, temp1);
+ }
+ __ mul_d(result, result, double_scratch1);
+ __ Branch(&done);
+
+ __ bind(&zero);
+ __ Move(result, kDoubleRegZero);
+ __ Branch(&done);
+
+ __ bind(&infinity);
+ __ ldc1(result, ExpConstant(2, temp3));
+
__ bind(&done);
}
@@ -624,7 +638,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
@@ -635,16 +649,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence()
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 32d7d0d65c..822b94ad79 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -99,6 +99,7 @@ class StringCharLoadGenerator : public AllStatic {
class MathExpGenerator : public AllStatic {
public:
+ // Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 16f75b8632..d31990be5c 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -78,88 +78,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
-// The back edge bookkeeping code matches the pattern:
-//
-// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
-// beq at, zero_reg, ok
-// lui t9, <interrupt stub address> upper
-// ori t9, <interrupt stub address> lower
-// jalr t9
-// nop
-// ok-label ----- pc_after points here
-//
-// We patch the code to the following form:
-//
-// addiu at, zero_reg, 1
-// beq at, zero_reg, ok ;; Not changed
-// lui t9, <on-stack replacement address> upper
-// ori t9, <on-stack replacement address> lower
-// jalr t9 ;; Not changed
-// nop ;; Not changed
-// ok-label ----- pc_after points here
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->addiu(at, zero_reg, 1);
- // Replace the stack check address in the load-immediate (lui/ori pair)
- // with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Restore the sltu instruction so beq can be taken again.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->slt(at, a3, zero_reg);
- // Restore the original call address.
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- static const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
- if (Assembler::IsAddImmediate(
- Assembler::instr_at(pc_after - 6 * kInstrSize))) {
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(osr_builtin->entry()));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -186,10 +104,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->environment_length();
output_frame->SetRegister(s0.code(), params);
output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
output_frame->SetRegister(s2.code(), handler);
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index df3f4170b1..cbd0788121 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -171,12 +171,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
- __ Push(ra, fp, cp, a1);
- __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adjust fp to point to caller's fp.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1653,13 +1648,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -3613,8 +3606,9 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into a0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
NumberToStringStub stub;
__ CallStub(&stub);
@@ -4926,6 +4920,83 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 6 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // slt at, a3, zero_reg (in case of count based interrupts)
+ // beq at, zero_reg, ok
+ // lui t9, <interrupt stub address> upper
+ // ori t9, <interrupt stub address> lower
+ // jalr t9
+ // nop
+ // ok-label ----- pc_after points here
+ patcher.masm()->slt(at, a3, zero_reg);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // addiu at, zero_reg, 1
+ // beq at, zero_reg, ok ;; Not changed
+ // lui t9, <on-stack replacement address> upper
+ // ori t9, <on-stack replacement address> lower
+ // jalr t9 ;; Not changed
+ // nop ;; Not changed
+ // ok-label ----- pc_after points here
+ patcher.masm()->addiu(at, zero_reg, 1);
+ break;
+ }
+ Address pc_immediate_load_address = pc - 4 * kInstrSize;
+ // Replace the stack check address in the load-immediate (lui/ori pair)
+ // with the entry address of the replacement code.
+ Assembler::set_target_address_at(pc_immediate_load_address,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 6 * kInstrSize;
+ Address pc_immediate_load_address = pc - 4 * kInstrSize;
+
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+ if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+ if (reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index e250e0ee4a..aa2773462c 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -1496,7 +1496,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index b37c7e0419..f54d4a5b0c 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -98,24 +98,6 @@ void LChunkBuilder::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -151,21 +133,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ Push(ra, fp, cp);
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- } else {
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ Push(ra, fp, cp, a1);
- // Add unused nop to ensure prologue sequence is identical for
- // full-codegen and lithium-codegen.
- __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adj. FP to point to saved FP.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- }
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -242,6 +210,8 @@ bool LCodeGen::GeneratePrologue() {
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -263,45 +233,15 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- RecordAndUpdatePosition(instr->position());
-
- instr->CompileToNative(this);
- }
- EnsureSpaceForLazyDeopt();
- last_lazy_deopt_pc_ = masm()->pc_offset();
- return !is_aborted();
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -701,10 +641,8 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
}
@@ -712,20 +650,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ lw(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -835,26 +789,31 @@ void LCodeGen::DeoptimizeIf(Condition condition,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -950,10 +909,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
@@ -964,7 +919,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -986,17 +941,10 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1046,6 +994,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@@ -1063,11 +1012,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1408,11 +1352,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
+ if (right_op->IsConstantOperand()) {
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
@@ -1423,7 +1367,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
- __ Subu(result, zero_reg, left);
+ if (overflow) {
+ __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ Subu(result, zero_reg, left);
+ }
break;
case 0:
if (bailout_on_minus_zero) {
@@ -1444,27 +1393,23 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ sll(result, left, shift);
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(scratch, left, shift);
- __ Addu(result, scratch, left);
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(scratch, left, shift);
- __ Subu(result, scratch, left);
- }
-
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) {
- __ Subu(result, zero_reg, result);
- }
-
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ sll(result, left, shift);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ sll(scratch, left, shift);
+ __ Addu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ sll(scratch, left, shift);
+ __ Subu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
} else {
// Generate standard code.
__ li(at, constant);
@@ -1473,12 +1418,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ Or(ToRegister(instr->temp()), left, right);
- }
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
// hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1502,12 +1445,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt,
+ __ Xor(at, left, right);
+ __ Branch(&done, ge, at, Operand(zero_reg));
+ // Bail out if the result is minus zero.
+ DeoptimizeIf(eq,
instr->environment(),
- ToRegister(instr->temp()),
+ result,
Operand(zero_reg));
__ bind(&done);
}
@@ -1789,33 +1733,43 @@ void LCodeGen::DoDateField(LDateField* instr) {
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
+ LOperand* index_op = instr->index();
Register value = ToRegister(instr->value());
Register scratch = scratch0();
String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+ __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ And(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+ __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
}
- __ Addu(scratch,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Addu(at, scratch, index);
- __ sb(value, MemOperand(at));
+ if (index_op->IsConstantOperand()) {
+ int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ sb(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+ } else {
+ __ sh(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+ }
} else {
- __ sll(at, index, 1);
- __ Addu(at, scratch, at);
- __ sh(value, MemOperand(at));
+ Register index = ToRegister(index_op);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Addu(scratch, string, Operand(index));
+ __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ } else {
+ __ sll(scratch, index, 1);
+ __ Addu(scratch, string, scratch);
+ __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ }
}
}
@@ -1823,6 +1777,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), at);
__ push(input_reg);
+ ASSERT(ToRegister(instr->context()).is(cp));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1974,6 +1929,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(a1));
ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -1986,13 +1942,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr,
Condition condition,
@@ -2057,25 +2006,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq, scratch0(), Operand(at));
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2223,6 +2153,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -2439,6 +2373,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2598,6 +2533,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label true_label, done;
ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
@@ -2708,6 +2644,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
// Get the temp register reserved by the instruction. This needs to be t0 as
// its slot of the pushing of safepoint registers is used to communicate the
@@ -2736,15 +2673,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2768,8 +2698,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0.
+ // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(v0);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
@@ -2814,7 +2747,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2824,6 +2757,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -2840,7 +2774,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register cell = scratch0();
// Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell()));
+ __ li(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2861,6 +2795,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -2937,7 +2872,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- __ lw(result, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ lb(result, operand);
+ } else {
+ __ lw(result, operand);
+ }
return;
}
@@ -2948,16 +2888,21 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ lw(result, FieldMemOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, offset));
+ object = result;
+ }
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (access.representation().IsByte()) {
+ __ lb(result, operand);
+ } else {
+ __ lw(result, operand);
}
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -3011,6 +2956,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -3132,28 +3083,31 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Register scratch = scratch0();
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
+
+ int base_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ (instr->additional_index() << element_size_shift);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- } else {
- key = ToRegister(instr->key());
+ base_offset += constant_key << element_size_shift;
}
+ __ Addu(scratch, elements, Operand(base_offset));
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ sll(scratch, key, shift_size);
- __ Addu(elements, elements, scratch);
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ sll(at, key, shift_size);
+ __ Addu(scratch, scratch, at);
}
- __ Addu(elements, elements, Operand(base_offset));
- __ ldc1(result, MemOperand(elements));
+
+ __ ldc1(result, MemOperand(scratch));
+
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
}
@@ -3172,7 +3126,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
+ Register key = ToRegister(instr->key());
// Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
@@ -3257,6 +3211,7 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -3394,7 +3349,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
@@ -3402,7 +3356,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3431,11 +3384,11 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
+ if (info()->IsOptimizing()) {
+ __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
}
}
@@ -3449,6 +3402,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
@@ -3458,8 +3412,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3482,7 +3437,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (a1_state == A1_UNINITIALIZED) {
@@ -3512,9 +3466,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3531,6 +3482,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3572,7 +3525,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
__ mov(tmp1, v0);
@@ -3890,6 +3844,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3898,6 +3855,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3906,6 +3866,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3914,6 +3877,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3921,17 +3887,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3944,17 +3909,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
@@ -3963,23 +3929,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ li(a2, Operand(instr->name()));
CallCode(ic, mode, instr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
@@ -3988,7 +3953,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ li(a2, Operand(instr->name()));
CallCode(ic, mode, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -4004,6 +3968,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -4017,6 +3982,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -4091,7 +4057,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (access.IsExternalMemory()) {
Register value = ToRegister(instr->value());
- __ sw(value, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ sb(value, operand);
+ } else {
+ __ sw(value, operand);
+ }
return;
}
@@ -4136,7 +4107,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
- __ sw(value, FieldMemOperand(object, offset));
+ MemOperand operand = FieldMemOperand(object, offset);
+ if (representation.IsByte()) {
+ __ sb(value, operand);
+ } else {
+ __ sw(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
@@ -4150,7 +4126,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ sw(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ if (representation.IsByte()) {
+ __ sb(value, operand);
+ } else {
+ __ sw(value, operand);
+ }
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -4168,6 +4149,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -4241,20 +4223,25 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key <<
- element_size_shift);
+ if (constant_key != 0) {
+ __ Addu(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
} else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
+ __ sll(address, key, shift_size);
+ __ Addu(address, external_pointer, address);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, additional_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0(), additional_offset));
+ __ sdc1(value, MemOperand(address, additional_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -4296,33 +4283,29 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
- Register key = no_reg;
Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
+ Label not_nan, done;
// Calculate the effective address of the slot in the array to store the
// double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+ __ Addu(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ Addu(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, ToRegister(instr->key()), shift_size);
+ __ Addu(scratch, scratch, at);
}
if (instr->NeedsCanonicalization()) {
@@ -4333,12 +4316,17 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Move(double_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+ __ Branch(&done);
}
__ bind(&not_nan);
__ sdc1(value, MemOperand(scratch, instr->additional_index() <<
element_size_shift));
+ __ bind(&done);
}
@@ -4404,6 +4392,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -4436,6 +4425,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetRAState(), kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
__ mov(a0, object_reg);
@@ -4452,14 +4442,16 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- Label fail;
- __ TestJSArrayForAllocationMemento(object, temp, ne, &fail);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+ ne, &no_memento_found);
DeoptimizeIf(al, instr->environment());
- __ bind(&fail);
+ __ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
StringAddStub stub(instr->hydrogen()->flags());
@@ -4514,7 +4506,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
__ StoreToSafepointRegisterSlot(v0, result);
@@ -4567,7 +4560,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -4707,7 +4700,15 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ StoreToSafepointRegisterSlot(zero_reg, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Move(dst, v0);
__ Subu(dst, dst, kHeapObjectTag);
@@ -4763,7 +4764,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4798,34 +4807,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
LEnvironment* env,
NumberUntagDMode mode) {
Register scratch = scratch0();
-
- Label load_smi, heap_number, done;
-
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ if (can_convert_undefined_to_nan) {
+ __ Branch(&convert, ne, scratch, Operand(at));
} else {
- Label heap_number, convert;
- __ Branch(&heap_number, eq, scratch, Operand(at));
-
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
-
- __ bind(&convert);
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- __ Branch(&done);
-
- __ bind(&heap_number);
+ DeoptimizeIf(ne, env, scratch, Operand(at));
}
- // Heap number to double register conversion.
+ // Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
@@ -4834,11 +4828,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ }
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4870,19 +4872,32 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ Label no_heap_number, check_bools, check_false;
+ __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
+ __ mov(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ Branch(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+ __ Branch(&check_bools, ne, input_reg, Operand(at));
ASSERT(ToRegister(instr->result()).is(input_reg));
- __ mov(input_reg, zero_reg);
- __ Branch(&done);
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
- __ bind(&heap_number);
- __ mov(scratch2, input_reg);
- __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ bind(&check_bools);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&check_false, ne, scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ li(input_reg, Operand(1)); // In delay slot.
+
+ __ bind(&check_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
@@ -4934,14 +4949,18 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5091,7 +5110,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
@@ -5111,7 +5130,10 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ And(at, scratch0(), Operand(kSmiTagMask));
@@ -5142,7 +5164,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
@@ -5151,12 +5172,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
// Do the CompareMap() directly within the Branch() and DeoptimizeIf().
if (instr->hydrogen()->has_migration_target()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
@@ -5309,12 +5331,15 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
+ instr->context());
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
+ instr->context());
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
+ instr->context());
}
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5329,6 +5354,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
// t3 = literals array.
@@ -5381,6 +5407,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
@@ -5563,14 +5590,13 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
@@ -5581,7 +5607,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5612,6 +5638,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5643,10 +5670,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5658,7 +5687,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 84105cae35..f643d02191 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -31,6 +31,7 @@
#include "deoptimizer.h"
#include "mips/lithium-gap-resolver-mips.h"
#include "mips/lithium-mips.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -42,43 +43,26 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -177,31 +161,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
- int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -214,14 +183,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
@@ -245,7 +212,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -254,9 +222,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
enum A1State {
A1_UNINITIALIZED,
@@ -324,8 +294,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -404,7 +374,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
LEnvironment* environment);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -412,24 +382,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -441,8 +401,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 4dc80226f2..fb94bc3bdf 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -417,18 +417,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -444,7 +445,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -660,7 +661,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -715,51 +716,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
}
- } else {
- right = UseRegisterAtStart(right_value);
- }
- // Shift operations can deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -768,29 +762,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, a1);
LOperand* right_operand = UseFixed(right, a0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -866,9 +865,31 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ HValue* first_operand = current->OperandCount() == 0
+ ? graph()->GetConstant1()
+ : current->OperandAt(0);
+ instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand)));
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -898,14 +919,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -997,19 +1016,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@@ -1047,8 +1062,9 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), a0),
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
UseFixed(instr->right(), a1));
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1057,18 +1073,14 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0),
- FixedTemp(t0));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), a0),
+ FixedTemp(t0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1091,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1122,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
}
- return NULL;
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1140,7 +1150,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@@ -1158,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1221,7 +1231,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
+ LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
@@ -1240,8 +1250,12 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -1271,57 +1285,57 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallFunction(context, function), v0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
}
@@ -1349,33 +1363,27 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LDivI* div = new(zone()) LDivI(dividend, divisor);
return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1466,17 +1474,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, f2),
- UseFixedDouble(right, f4));
- return MarkAsCall(DefineFixedDouble(mod, f2), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1485,20 +1486,39 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left;
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->BetterLeftOperand());
- temp = TempRegister();
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->BetterLeftOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
@@ -1579,7 +1599,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1637,9 +1656,10 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1666,6 +1686,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1674,8 +1696,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1713,10 +1735,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1783,11 +1806,9 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseTempRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
}
@@ -1805,9 +1826,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1836,7 +1865,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1940,12 +1968,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -1994,8 +2016,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), v0),
+ return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
parameter_count);
}
@@ -2028,8 +2053,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), a0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -2045,10 +2072,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), a1);
LOperand* value = UseFixed(instr->value(), a0);
LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -2083,8 +2111,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), a0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0);
return MarkAsCall(result, instr);
}
@@ -2096,6 +2126,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2112,7 +2147,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
+ obj = UseRegister(instr->elements());
} else {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
@@ -2140,18 +2175,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), a1);
LOperand* key = UseFixed(instr->key(), a0);
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2162,14 +2196,18 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
key = UseRegisterOrConstantAtStart(instr->key());
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
}
return new(zone()) LStoreKeyed(object, key, val);
@@ -2177,17 +2215,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
@@ -2196,6 +2230,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), a2);
LOperand* key = UseFixed(instr->key(), a1);
LOperand* val = UseFixed(instr->value(), a0);
@@ -2204,7 +2239,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
@@ -2214,11 +2250,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
return AssignPointerMap(result);
}
}
@@ -2277,56 +2314,68 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), a1);
LOperand* val = UseFixed(instr->value(), a0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0),
- instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
}
@@ -2373,8 +2422,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
}
@@ -2419,7 +2468,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -2458,10 +2508,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2494,7 +2547,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2506,8 +2559,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), a0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 29a8eac63f..301be8fdf2 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -105,7 +105,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -113,13 +112,13 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
- V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -213,7 +212,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -254,15 +252,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -274,7 +263,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -302,7 +291,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -401,17 +389,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
@@ -482,8 +470,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -688,17 +682,15 @@ class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
@@ -782,12 +774,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LMathAbs(LOperand* value) {
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@@ -936,19 +930,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -999,15 +980,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -1083,15 +1066,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -1100,28 +1085,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1142,19 +1131,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1300,7 +1276,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1355,8 +1331,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
@@ -1387,13 +1363,15 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1489,16 +1467,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
@@ -1510,11 +1493,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* parameter_count) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
- inputs_[1] = parameter_count;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
@@ -1526,7 +1510,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
- LOperand* parameter_count() { return inputs_[1]; }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1545,13 +1529,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1573,6 +1559,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1611,15 +1606,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1632,13 +1629,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1663,16 +1662,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1802,8 +1804,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
@@ -1845,13 +1853,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
@@ -1862,13 +1872,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1880,8 +1892,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
@@ -1892,13 +1910,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1907,8 +1927,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1930,13 +1956,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1947,13 +1975,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@@ -1964,13 +1994,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2099,7 +2140,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2171,15 +2212,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2216,17 +2259,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2237,14 +2285,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
@@ -2254,8 +2305,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2277,15 +2330,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
@@ -2293,28 +2348,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@@ -2427,12 +2486,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
@@ -2442,15 +2506,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@@ -2469,13 +2545,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2522,8 +2600,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2534,13 +2618,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@@ -2596,8 +2682,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
@@ -2621,6 +2707,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2753,7 +2841,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index a85b0d8034..e0cb1ba824 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -35,6 +35,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@@ -248,10 +249,6 @@ void MacroAssembler::RecordWrite(Register object,
SmiCheck smi_check) {
ASSERT(!AreAliased(object, address, value, t8));
ASSERT(!AreAliased(object, address, value, t9));
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
if (emit_debug_code()) {
lw(at, MemOperand(address));
@@ -3220,11 +3217,10 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- Branch(&done, eq, length, Operand(zero_reg));
+ Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
@@ -3233,7 +3229,7 @@ void MacroAssembler::CopyBytes(Register src,
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
@@ -3847,12 +3843,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset_from_fp) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -3915,12 +3913,13 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
}
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// Load value from ReturnValue.
- lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize));
+ lw(v0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
@@ -3941,14 +3940,23 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ lw(cp, *context_restore_operand);
+ }
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, true);
+ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -4125,7 +4133,8 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4142,25 +4151,11 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- PrepareCEntryArgs(function->nargs);
- PrepareCEntryFunction(ExternalReference(function, isolate()));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments,
BranchDelaySlot bd) {
@@ -4591,6 +4586,40 @@ void MacroAssembler::LoadNumberAsInt32(Register object,
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ Push(ra, fp, cp);
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Save the function's original return address
+ // (it will be clobbered by Call(t9))
+ mov(at, ra);
+ // Load the stub address to t9 and call it
+ li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+ Call(t9);
+ // Record the stub address in the empty space for GetCodeAgeAndParity()
+ dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ } else {
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Addu(fp, sp, Operand(2 * kPointerSize));
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
@@ -4684,6 +4713,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
+ bool restore_context,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
@@ -4700,9 +4730,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- lw(cp, MemOperand(t8));
+ if (restore_context) {
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ lw(cp, MemOperand(t8));
+ }
#ifdef DEBUG
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
sw(a3, MemOperand(t8));
#endif
@@ -4929,6 +4962,86 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ sra(mask, mask, kSmiTagSize + 1);
+ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Xor(scratch1, scratch1, Operand(scratch2));
+ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ Branch(not_found);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ sra(scratch, object, 1); // Shift away the tag.
+ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -5492,23 +5605,24 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
+ Label* no_memento_found,
Condition cond,
Label* allocation_memento_present) {
- Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
Addu(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- Branch(&no_memento_available, lt, scratch_reg, Operand(new_space_start));
+ Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
li(at, Operand(new_space_allocation_top));
lw(at, MemOperand(at));
- Branch(&no_memento_available, gt, scratch_reg, Operand(at));
+ Branch(no_memento_found, gt, scratch_reg, Operand(at));
lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
- Branch(allocation_memento_present, cond, scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ if (allocation_memento_present) {
+ Branch(allocation_memento_present, cond, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
+ }
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 75ded88490..0805bb9670 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -51,6 +51,12 @@ class JumpTarget;
// MIPS generated code calls C code, it must be via t9 register.
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode {
+ EMIT_RETURN = true,
+ NO_EMIT_RETURN = false
+};
+
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@@ -848,7 +854,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles,
Register arg_count,
- bool do_return = false);
+ bool restore_context,
+ bool do_return = NO_EMIT_RETURN);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1194,11 +1201,18 @@ class MacroAssembler: public Assembler {
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1271,7 +1285,8 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_fp);
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
@@ -1419,6 +1434,18 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii(
@@ -1471,6 +1498,9 @@ class MacroAssembler: public Assembler {
And(reg, reg, Operand(mask));
}
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
@@ -1493,11 +1523,22 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, jump to allocation_info_present
- void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Condition cond,
- Label* allocation_memento_present);
+ // If allocation info is present, jump to allocation_memento_present.
+ void TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond = al,
+ Label* allocation_memento_present = NULL);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found, eq, memento_found);
+ bind(&no_memento_found);
+ }
private:
void CallCFunctionHelper(Register function,
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 1a04fd1029..49dec3c024 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1063,15 +1063,56 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ int stack_alignment = OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Subu(sp, sp, Operand(kPointerSize));
+ ASSERT(IsPowerOf2(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ sw(scratch, MemOperand(sp));
+
__ mov(a2, frame_pointer());
// Code* of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
- // a0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ Subu(sp, sp, Operand(stack_alignment));
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are in registers, meaning we teat the return address as
+ // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ __ li(t9, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, t9);
+
+ // DirectCEntryStub allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ lw(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1276,21 +1317,6 @@ void RegExpMacroAssemblerMIPS::CheckStackLimit() {
}
-void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ li(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ lw(sp, MemOperand(sp, 16));
- }
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-}
-
-
void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
@@ -1312,23 +1338,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- const int return_address_offset = kCArgsSlotsSize;
- __ Addu(a0, sp, return_address_offset);
- __ sw(ra, MemOperand(a0, 0));
- __ mov(t9, t1);
- __ Call(t9);
- __ lw(ra, MemOperand(sp, return_address_offset));
- __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- __ Jump(ra);
-}
-
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
index 86ae4d45ee..063582c648 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -217,14 +217,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index ea8b65948a..5a96efe9c1 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -2274,9 +2274,13 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case DIV:
// Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0 and
- // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
- if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 58452cad1b..471c25ef82 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -374,30 +374,26 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
}
@@ -429,7 +425,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -448,19 +444,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// a0 : value.
Label exit;
@@ -612,15 +608,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// a0 : value
Label exit;
@@ -733,9 +729,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ li(this->name(), Operand(name));
@@ -833,25 +829,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : ReturnValue default value
- // -- sp[20] : ReturnValue
- // -- sp[24] : last JS argument
+ // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
+ // : holder (set by CheckPrototypes)
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 5) * 4] : first JS argument
- // -- sp[(argc + 6) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+ __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
- // Pass the additional arguments.
+ // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@@ -860,18 +857,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ li(t2, call_data);
}
-
+ // Store call data.
+ __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
+ // Store isolate.
__ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate ReturnValue default and ReturnValue.
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
- __ sw(t2, MemOperand(sp, 2 * kPointerSize));
- __ sw(t3, MemOperand(sp, 3 * kPointerSize));
+ __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ sw(t1, MemOperand(sp, 4 * kPointerSize));
- __ sw(t1, MemOperand(sp, 5 * kPointerSize));
+ __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
+ __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
- __ Addu(a2, sp, Operand(5 * kPointerSize));
+ __ Move(a2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -880,19 +877,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // a0 = v8::Arguments&
+ // a0 = FunctionCallbackInfo&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
__ Addu(a0, sp, kPointerSize);
-
- // v8::Arguments::implicit_args_
+ // FunctionCallbackInfo::implicit_args_
__ sw(a2, MemOperand(a0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ Addu(t0, a2, Operand(argc * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ sw(t0, MemOperand(a0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
+ // FunctionCallbackInfo::length_ = argc
__ li(t0, Operand(argc));
__ sw(t0, MemOperand(a0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
+ // FunctionCallbackInfo::is_construct_call = 0
__ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
@@ -910,12 +906,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(
+ fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
+
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
a1,
kStackUnwindSpace,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -929,11 +932,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
+ typedef FunctionCallbackArguments FCA;
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ Subu(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
- __ sw(receiver, MemOperand(sp, 0));
+ __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ sw(receiver, MemOperand(sp, index * kPointerSize));
@@ -944,7 +948,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ sw(receiver, MemOperand(sp, index-- * kPointerSize));
}
- GenerateFastApiDirectCall(masm, optimization, argc);
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
@@ -1058,7 +1062,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -1199,8 +1204,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@@ -1258,7 +1264,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@@ -1290,9 +1296,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ Branch(success);
__ bind(miss);
@@ -1301,9 +1307,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ b(success);
GenerateRestoreName(masm(), miss, name);
@@ -1312,7 +1318,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1358,7 +1364,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+void LoadStubCompiler::NonexistentHandlerFrontend(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@@ -1378,10 +1384,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1397,36 +1403,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(v0, value);
__ Ret();
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
__ li(scratch3(), callback);
__ lw(scratch3(), FieldMemOperand(scratch3(),
@@ -1444,6 +1450,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
__ sw(reg, MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
+ __ Addu(scratch2(), sp, 1 * kPointerSize);
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
__ mov(a0, sp); // (first argument - a0) = Handle<Name>
@@ -1452,13 +1459,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
// (second argument - a1) = AccessorInfo&
__ Addu(a1, sp, kPointerSize);
- const int kStackUnwindSpace = kFastApiCallArguments + 1;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
@@ -1475,11 +1482,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
a2,
kStackUnwindSpace,
- 6);
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> interceptor_holder,
@@ -2558,7 +2566,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
@@ -3011,6 +3019,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -3022,7 +3031,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(a0);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 3b360bb5d7..4277136b60 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -117,7 +117,7 @@ function LookupMirror(handle) {
* @returns {Mirror} the mirror reflects the undefined value
*/
function GetUndefinedMirror() {
- return MakeMirror(void 0);
+ return MakeMirror(UNDEFINED);
}
@@ -482,7 +482,7 @@ ValueMirror.prototype.value = function() {
* @extends ValueMirror
*/
function UndefinedMirror() {
- %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror);
+ %_CallFunction(this, UNDEFINED_TYPE, UNDEFINED, ValueMirror);
}
inherits(UndefinedMirror, ValueMirror);
@@ -957,7 +957,7 @@ FunctionMirror.prototype.scopeCount = function() {
FunctionMirror.prototype.scope = function(index) {
if (this.resolved()) {
- return new ScopeMirror(void 0, this, index);
+ return new ScopeMirror(UNDEFINED, this, index);
}
};
@@ -1670,7 +1670,7 @@ FrameMirror.prototype.scopeCount = function() {
FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, void 0, index);
+ return new ScopeMirror(this, UNDEFINED, index);
};
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 9cf9e2e8a4..95d3daada2 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -43,49 +43,6 @@
using namespace v8;
-static const unsigned int kMaxCounters = 256;
-
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
- name_[i] = name[i];
- }
- name_[i] = '\0';
- return &counter_;
- }
- private:
- int32_t counter_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --save-counters options is used
-class CounterCollection {
- public:
- CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
- }
- Counter* GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
- }
- private:
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
class Compressor {
public:
@@ -310,6 +267,7 @@ void DumpException(Handle<Message> message) {
int main(int argc, char** argv) {
V8::InitializeICU();
+ i::Isolate::SetCrashIfDefaultIsolateInitialized();
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -330,7 +288,10 @@ int main(int argc, char** argv) {
exit(1);
}
#endif
- Isolate* isolate = Isolate::GetCurrent();
+ i::FLAG_logfile_per_isolate = false;
+
+ Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Serializer::Enable(internal_isolate);
Persistent<Context> context;
diff --git a/deps/v8/src/msan.h b/deps/v8/src/msan.h
new file mode 100644
index 0000000000..484c9fa397
--- /dev/null
+++ b/deps/v8/src/msan.h
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// MemorySanitizer support.
+
+#ifndef V8_MSAN_H_
+#define V8_MSAN_H_
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER)
+# define MEMORY_SANITIZER
+#endif
+
+#ifdef MEMORY_SANITIZER
+# include <sanitizer/msan_interface.h>
+// Marks a memory range as fully initialized.
+# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
+#else
+# define MSAN_MEMORY_IS_INITIALIZED(p, s)
+#endif
+
+#endif // V8_MSAN_H_
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 1035792e8b..9c7ac3889e 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -72,12 +72,12 @@ function ObservationWeakMap(map) {
ObservationWeakMap.prototype = {
get: function(key) {
key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return void 0;
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
return %WeakCollectionGet(this.map_, key);
},
set: function(key, value) {
key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return void 0;
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
%WeakCollectionSet(this.map_, key, value);
},
has: function(key) {
@@ -284,11 +284,6 @@ function AcceptArgIsValid(arg) {
arg.length < 0)
return false;
- var length = arg.length;
- for (var i = 0; i < length; i++) {
- if (!IS_STRING(arg[i]))
- return false;
- }
return true;
}
@@ -497,7 +492,7 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
ObjectInfoAddPerformingType(objectInfo, changeType);
try {
- %_CallFunction(void 0, changeFn);
+ %_CallFunction(UNDEFINED, changeFn);
} finally {
ObjectInfoRemovePerformingType(objectInfo, changeType);
}
@@ -530,7 +525,7 @@ function CallbackDeliverPending(callback) {
%MoveArrayContents(callbackInfo, delivered);
try {
- %_CallFunction(void 0, delivered, callback);
+ %_CallFunction(UNDEFINED, delivered, callback);
} catch (ex) {}
return true;
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 5d9e161a7e..6ab2ddffe2 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -95,6 +95,9 @@ void HeapObject::HeapObjectVerify() {
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ ConstantPoolArray::cast(this)->ConstantPoolArrayVerify();
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
@@ -303,6 +306,13 @@ void ExternalDoubleArray::ExternalDoubleArrayVerify() {
}
+bool JSObject::ElementsAreSafeToExamine() {
+ return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
+ reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map();
+}
+
+
void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
@@ -330,10 +340,9 @@ void JSObject::JSObjectVerify() {
}
}
- // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
- // allocation folding is turned off.
- if (reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map()) {
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
@@ -438,6 +447,11 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
+void ConstantPoolArray::ConstantPoolArrayVerify() {
+ CHECK(IsConstantPoolArray());
+}
+
+
void JSGeneratorObject::JSGeneratorObjectVerify() {
// In an expression like "new g()", there can be a point where a generator
// object is allocated but its fields are all undefined, as it hasn't yet been
@@ -664,16 +678,20 @@ void Code::CodeVerify() {
}
-void Code::VerifyEmbeddedMapsDependency() {
+void Code::VerifyEmbeddedObjectsDependency() {
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Map* map = Map::cast(it.rinfo()->target_object());
- if (map->CanTransition()) {
+ Object* obj = it.rinfo()->target_object();
+ if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
CHECK(map->dependent_code()->Contains(
DependentCode::kWeaklyEmbeddedGroup, this));
+ } else if (obj->IsJSObject()) {
+ Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
+ WeakHashTable* table = WeakHashTable::cast(raw_table);
+ CHECK(DependentCode::cast(table->Lookup(obj))->Contains(
+ DependentCode::kWeaklyEmbeddedGroup, this));
}
}
}
@@ -683,10 +701,9 @@ void Code::VerifyEmbeddedMapsDependency() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
- // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
- // allocation folding is turned off.
- if (reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map()) {
+ // If a GC was caused while constructing this array, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
CHECK(elements()->IsUndefined() ||
elements()->IsFixedArray() ||
elements()->IsFixedDoubleArray());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 89abe50433..deb33653f7 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -80,7 +80,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
+ SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
@@ -133,7 +133,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
bool Object::IsFixedArrayBase() {
- return IsFixedArray() || IsFixedDoubleArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray();
}
@@ -285,14 +285,13 @@ bool Object::HasValidElements() {
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
- Representation representation,
- PretenureFlag tenure) {
+ Representation representation) {
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
if (IsUninitialized()) {
- return heap->AllocateHeapNumber(0, tenure);
+ return heap->AllocateHeapNumber(0);
}
- return heap->AllocateHeapNumber(Number(), tenure);
+ return heap->AllocateHeapNumber(Number());
}
@@ -572,6 +571,7 @@ TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
bool Object::IsJSWeakCollection() {
@@ -1028,6 +1028,12 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INT32_FIELD(p, offset) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT32_FIELD(p, offset, value) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
@@ -1184,7 +1190,7 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
+ SLOW_ASSERT(heap != NULL);
return heap;
}
@@ -1301,7 +1307,7 @@ FixedArrayBase* JSObject::elements() {
void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
@@ -1323,6 +1329,14 @@ bool JSObject::ShouldTrackAllocationInfo() {
}
+void AllocationSite::Initialize() {
+ SetElementsKind(GetInitialFastElementsKind());
+ set_nested_site(Smi::FromInt(0));
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
@@ -1535,65 +1549,6 @@ MaybeObject* JSObject::ResetElements() {
}
-MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
- ASSERT(this->map()->inobject_properties() == map->inobject_properties());
- ElementsKind obj_kind = this->map()->elements_kind();
- ElementsKind map_kind = map->elements_kind();
- if (map_kind != obj_kind) {
- ElementsKind to_kind = map_kind;
- if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
- IsDictionaryElementsKind(obj_kind)) {
- to_kind = obj_kind;
- }
- MaybeObject* maybe_obj =
- IsDictionaryElementsKind(to_kind) ? NormalizeElements()
- : TransitionElementsKind(to_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- MaybeObject* maybe_map = map->AsElementsKind(to_kind);
- if (!maybe_map->To(&map)) return maybe_map;
- }
- int total_size =
- map->NumberOfOwnDescriptors() + map->unused_property_fields();
- int out_of_object = total_size - map->inobject_properties();
- if (out_of_object != properties()->length()) {
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(out_of_object);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
-MaybeObject* JSObject::MigrateInstance() {
- // Converting any field to the most specific type will cause the
- // GeneralizeFieldRepresentation algorithm to create the most general existing
- // transition that matches the object. This achieves what is needed.
- Map* original_map = map();
- MaybeObject* maybe_result = GeneralizeFieldRepresentation(
- 0, Representation::None(), ALLOW_AS_CONSTANT);
- JSObject* result;
- if (FLAG_trace_migration && maybe_result->To(&result)) {
- PrintInstanceMigration(stdout, original_map, result->map());
- }
- return maybe_result;
-}
-
-
-MaybeObject* JSObject::TryMigrateInstance() {
- Map* new_map = map()->CurrentMapForDeprecated();
- if (new_map == NULL) return Smi::FromInt(0);
- Map* original_map = map();
- MaybeObject* maybe_result = MigrateToMap(new_map);
- JSObject* result;
- if (FLAG_trace_migration && maybe_result->To(&result)) {
- PrintInstanceMigration(stdout, original_map, result->map());
- }
- return maybe_result;
-}
-
-
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
@@ -1629,13 +1584,6 @@ Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
}
-int JSObject::LastAddedFieldIndex() {
- Map* map = this->map();
- int last_added = map->LastAdded();
- return map->instance_descriptors()->GetFieldIndex(last_added);
-}
-
-
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -1719,7 +1667,9 @@ int JSObject::GetHeaderSize() {
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
default:
- UNREACHABLE();
+ // TODO(jkummerow): Re-enable this. Blink currently hits this
+ // from its CustomElementConstructorBuilder.
+ // UNREACHABLE();
return 0;
}
}
@@ -1946,13 +1896,14 @@ void Object::VerifyApiCallResultType() {
FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
+ ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray() ||
+ object->IsConstantPoolArray());
return reinterpret_cast<FixedArrayBase*>(object);
}
Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
@@ -2045,6 +1996,98 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
+SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
+SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+
+
+int ConstantPoolArray::first_int64_index() {
+ return 0;
+}
+
+
+int ConstantPoolArray::count_of_int64_entries() {
+ return first_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_ptr_entries() {
+ return first_int32_index() - first_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_int32_entries() {
+ return length() - first_int32_index();
+}
+
+
+void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ set_first_ptr_index(number_of_int64_entries);
+ set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
+ set_length(number_of_int64_entries + number_of_ptr_entries +
+ number_of_int32_entries);
+}
+
+
+int64_t ConstantPoolArray::get_int64_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= 0 && index < first_ptr_index());
+ return READ_INT64_FIELD(this, OffsetOfElementAt(index));
+}
+
+double ConstantPoolArray::get_int64_entry_as_double(int index) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= 0 && index < first_ptr_index());
+ return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+Object* ConstantPoolArray::get_ptr_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ return READ_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+int32_t ConstantPoolArray::get_int32_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int32_index() && index < length());
+ return READ_INT32_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+void ConstantPoolArray::set(int index, Object* value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ WRITE_FIELD(this, OffsetOfElementAt(index), value);
+ WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, int64_t value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, double value) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, int32_t value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= this->first_int32_index() && index < length());
+ WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
@@ -2537,6 +2580,7 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(ConstantPoolArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
@@ -3432,6 +3476,12 @@ int HeapObject::SizeFromMap(Map* map) {
return FixedDoubleArray::SizeFor(
reinterpret_cast<FixedDoubleArray*>(this)->length());
}
+ if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
+ return ConstantPoolArray::SizeFor(
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
+ }
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -3808,7 +3858,8 @@ Code::StubType Code::type() {
int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
+ ASSERT(is_call_stub() || is_keyed_call_stub() ||
+ kind() == STUB || is_handler());
return ExtractArgumentsCountFromFlags(flags());
}
@@ -3828,6 +3879,7 @@ inline void Code::set_is_crankshafted(bool value) {
int Code::major_key() {
ASSERT(kind() == STUB ||
+ kind() == HANDLER ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == COMPARE_NIL_IC ||
@@ -3842,6 +3894,7 @@ int Code::major_key() {
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
+ kind() == HANDLER ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == COMPARE_NIL_IC ||
@@ -4077,6 +4130,11 @@ bool Code::is_inline_cache_stub() {
}
+bool Code::is_keyed_stub() {
+ return is_keyed_load_stub() || is_keyed_store_stub() || is_keyed_call_stub();
+}
+
+
bool Code::is_debug_stub() {
return ic_state() == DEBUG_STUB;
}
@@ -4495,6 +4553,9 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
+ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
+ACCESSORS(AllocationSite, dependent_code, DependentCode,
+ kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
@@ -5457,19 +5518,24 @@ ElementsKind JSObject::GetElementsKind() {
#if DEBUG
FixedArrayBase* fixed_array =
reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(kind) &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
+
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
+ Map* map = fixed_array->map();
+ ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+ (map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map())) ||
+ (IsFastDoubleElementsKind(kind) &&
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
+ (kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ fixed_array->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
+ }
#endif
return kind;
}
@@ -5729,19 +5795,23 @@ Object* JSReceiver::GetConstructor() {
}
-bool JSReceiver::HasProperty(Name* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetPropertyAttribute(name) != ABSENT;
+ return object->GetPropertyAttribute(*name) != ABSENT;
}
-bool JSReceiver::HasLocalProperty(Name* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetLocalPropertyAttribute(name) != ABSENT;
+ return object->GetLocalPropertyAttribute(*name) != ABSENT;
}
@@ -5783,21 +5853,23 @@ MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
}
-bool JSReceiver::HasElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true) != ABSENT;
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, true) != ABSENT;
}
-bool JSReceiver::HasLocalElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false) != ABSENT;
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, false) != ABSENT;
}
@@ -5964,6 +6036,7 @@ uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) {
MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) {
+ ASSERT(key->IsUniqueName());
return key;
}
@@ -5996,6 +6069,34 @@ MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap,
}
+template <int entrysize>
+bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+ return key->SameValue(other);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) {
+ intptr_t hash = reinterpret_cast<intptr_t>(key);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key,
+ Object* other) {
+ intptr_t hash = reinterpret_cast<intptr_t>(other);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap,
+ Object* key) {
+ return key;
+}
+
+
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
@@ -6065,6 +6166,12 @@ MaybeObject* FixedDoubleArray::Copy() {
}
+MaybeObject* ConstantPoolArray::Copy() {
+ if (length() == 0) return this;
+ return GetHeap()->CopyConstantPoolArray(this);
+}
+
+
void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
set(1 + index * 2, Smi::FromInt(id.ToInt()));
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 0b8fdfda03..60c1ef4c38 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -95,6 +95,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(out);
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(out);
break;
@@ -630,6 +633,23 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
}
+void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "ConstantPoolArray");
+ PrintF(out, " - length: %d", length());
+ for (int i = 0; i < length(); i++) {
+ if (i < first_ptr_index()) {
+ PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
+ } else if (i < first_int32_index()) {
+ PrintF(out, "\n [%d]: pointer: %p", i,
+ reinterpret_cast<void*>(get_ptr_entry(i)));
+ } else {
+ PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
+ }
+ }
+ PrintF(out, "\n");
+}
+
+
void JSValue::JSValuePrint(FILE* out) {
HeapObject::PrintHeader(out, "ValueObject");
value()->Print(out);
@@ -1100,9 +1120,11 @@ void AllocationSite::AllocationSitePrint(FILE* out) {
HeapObject::PrintHeader(out, "AllocationSite");
PrintF(out, " - weak_next: ");
weak_next()->ShortPrint(out);
- PrintF(out, "\n");
-
- PrintF(out, " - transition_info: ");
+ PrintF(out, "\n - dependent code: ");
+ dependent_code()->ShortPrint(out);
+ PrintF(out, "\n - nested site: ");
+ nested_site()->ShortPrint(out);
+ PrintF(out, "\n - transition_info: ");
if (transition_info()->IsCell()) {
Cell* cell = Cell::cast(transition_info());
Object* cell_contents = cell->value();
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 46cc9d7989..93b7cb96ad 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -185,6 +185,8 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
+
table_.Register(kVisitNativeContext, &VisitNativeContext);
table_.Register(kVisitAllocationSite,
@@ -261,10 +263,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !object->IsMap() || !Map::cast(object)->CanTransition()) {
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
StaticVisitor::MarkObject(heap, object);
}
}
@@ -452,6 +452,22 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+ int first_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index());
+ int last_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index() + constant_pool->count_of_ptr_entries());
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, first_ptr_offset),
+ HeapObject::RawField(object, last_ptr_offset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index cd46013398..5ced2cf7a3 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -82,6 +82,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FIXED_DOUBLE_ARRAY_TYPE:
return kVisitFixedDoubleArray;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ return kVisitConstantPoolArray;
+
case ODDBALL_TYPE:
return kVisitOddball;
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 21757377a4..60e6f67471 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -54,6 +54,7 @@ class StaticVisitorBase : public AllStatic {
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
V(NativeContext) \
V(AllocationSite) \
V(DataObject2) \
@@ -416,6 +417,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
+ INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 35646b8be5..f7c89175da 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "accessors.h"
+#include "allocation-site-scopes.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
@@ -142,6 +143,20 @@ void Object::Lookup(Name* name, LookupResult* result) {
}
+Handle<Object> Object::GetPropertyWithReceiver(
+ Handle<Object> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ PropertyAttributes* attributes) {
+ LookupResult lookup(name->GetIsolate());
+ object->Lookup(*name, &lookup);
+ Handle<Object> result =
+ GetProperty(object, receiver, &lookup, name, attributes);
+ ASSERT(*attributes <= ABSENT);
+ return result;
+}
+
+
MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
Name* name,
PropertyAttributes* attributes) {
@@ -328,9 +343,18 @@ static MaybeObject* GetDeclaredAccessorProperty(Object* receiver,
}
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- Name* name) {
+Handle<FixedArray> JSObject::EnsureWritableFastElements(
+ Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->EnsureWritableFastElements(),
+ FixedArray);
+}
+
+
+Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name) {
Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -338,66 +362,71 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(isolate, receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_HEAP_FUNCTION(isolate,
+ (callback->getter)(isolate, *receiver, callback->data),
+ Object);
}
// api style callbacks.
if (structure->IsAccessorInfo()) {
- if (!AccessorInfo::cast(structure)->IsCompatibleReceiver(receiver)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
+ if (!accessor_info->IsCompatibleReceiver(*receiver)) {
+ Handle<Object> args[2] = { name, receiver };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>::null();
}
// TODO(rossberg): Handling symbols in the API requires changing the API,
// so we do not support it for now.
- if (name->IsSymbol()) return isolate->heap()->undefined_value();
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
if (structure->IsDeclaredAccessorInfo()) {
- return GetDeclaredAccessorProperty(receiver,
- DeclaredAccessorInfo::cast(structure),
- isolate);
+ CALL_HEAP_FUNCTION(
+ isolate,
+ GetDeclaredAccessorProperty(*receiver,
+ DeclaredAccessorInfo::cast(*structure),
+ isolate),
+ Object);
}
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- Object* fun_obj = data->getter();
+
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
v8::AccessorGetterCallback call_fun =
- v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
+ v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+ if (call_fun == NULL) return isolate->factory()->undefined_value();
+
HandleScope scope(isolate);
- JSObject* self = JSObject::cast(receiver);
- Handle<String> key(String::cast(name));
- LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- PropertyCallbackArguments args(isolate, data->data(), self, this);
+ Handle<JSObject> self = Handle<JSObject>::cast(receiver);
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("load", *self, *name));
+ PropertyCallbackArguments args(isolate, data->data(), *self, *object);
v8::Handle<v8::Value> result =
args.Call(call_fun, v8::Utils::ToLocal(key));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- Object* return_value = *v8::Utils::OpenHandle(*result);
+ Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
- return return_value;
+ return scope.CloseAndEscape(return_value);
}
// __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
+ Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
+ isolate);
+ if (getter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ CALL_HEAP_FUNCTION(
+ isolate,
+ object->GetPropertyWithDefinedGetter(*receiver,
+ JSReceiver::cast(*getter)),
+ Object);
}
-
- UNREACHABLE();
- return NULL;
+ // Getter is not a function.
+ return isolate->factory()->undefined_value();
}
@@ -455,18 +484,15 @@ Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
StrictModeFlag strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- CALL_HEAP_FUNCTION(isolate,
- proxy->SetPropertyWithHandler(
- *receiver, *name, *value, NONE, strict_mode),
- Object);
+ return SetPropertyWithHandler(
+ proxy, receiver, name, value, NONE, strict_mode);
}
-bool JSProxy::HasElementWithHandler(uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return HasPropertyWithHandler(name);
+bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return HasPropertyWithHandler(proxy, name);
}
@@ -496,56 +522,51 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
// Only deal with CALLBACKS and INTERCEPTOR
-MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
- Object* receiver,
+Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
LookupResult* result,
- Name* name,
+ Handle<Name> name,
PropertyAttributes* attributes) {
+ Isolate* isolate = name->GetIsolate();
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- *attributes = result->GetAttributes();
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- }
- } else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
- if (pair->all_can_read()) {
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- }
+ Handle<Object> callback_obj(result->GetCallbackObject(), isolate);
+ if (callback_obj->IsAccessorInfo()) {
+ if (!AccessorInfo::cast(*callback_obj)->all_can_read()) break;
+ *attributes = result->GetAttributes();
+ // Fall through to GetPropertyWithCallback.
+ } else if (callback_obj->IsAccessorPair()) {
+ if (!AccessorPair::cast(*callback_obj)->all_can_read()) break;
+ // Fall through to GetPropertyWithCallback.
+ } else {
+ break;
}
- break;
+ Handle<JSObject> holder(result->holder(), isolate);
+ return GetPropertyWithCallback(holder, receiver, callback_obj, name);
}
case NORMAL:
case FIELD:
case CONSTANT: {
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+ LookupResult r(isolate);
+ result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r);
if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
+ return GetPropertyWithFailedAccessCheck(
+ object, receiver, &r, name, attributes);
}
break;
}
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedProperty(name, &r);
+ LookupResult r(isolate);
+ result->holder()->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
+ return GetPropertyWithFailedAccessCheck(
+ object, receiver, &r, name, attributes);
}
break;
}
@@ -556,11 +577,9 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
- Heap* heap = name->GetHeap();
- Isolate* isolate = heap->isolate();
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_GET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
@@ -643,67 +662,63 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(result, *value),
- Object);
-}
-
-
-MaybeObject* JSObject::SetNormalizedProperty(LookupResult* result,
- Object* value) {
- ASSERT(!HasFastProperties());
- if (IsGlobalObject()) {
- PropertyCell* cell = PropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- MaybeObject* maybe_type = cell->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value) {
+ ASSERT(!object->HasFastProperties());
+ NameDictionary* property_dictionary = object->property_dictionary();
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(PropertyCell::cast(
+ property_dictionary->ValueAt(result->GetDictionaryEntry())));
+ PropertyCell::SetValueInferType(cell, value);
} else {
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+ property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value);
}
- return value;
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
}
-MaybeObject* JSObject::SetNormalizedProperty(Name* name,
- Object* value,
- PropertyDetails details) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ ASSERT(!object->HasFastProperties());
+ Handle<NameDictionary> property_dictionary(object->property_dictionary());
+
+ if (!name->IsUniqueName()) {
+ name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ Handle<String>::cast(name));
+ }
+
+ int entry = property_dictionary->FindEntry(*name);
if (entry == NameDictionary::kNotFound) {
- Object* store_value = value;
- if (IsGlobalObject()) {
- Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- Object* dict;
- { MaybeObject* maybe_dict =
- property_dictionary()->Add(name, store_value, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
+ Handle<Object> store_value = value;
+ if (object->IsGlobalObject()) {
+ store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
}
- set_properties(NameDictionary::cast(dict));
- return value;
+
+ property_dictionary =
+ NameDictionaryAdd(property_dictionary, name, store_value, details);
+ object->set_properties(*property_dictionary);
+ return;
}
- PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
+ PropertyDetails original_details = property_dictionary->DetailsAt(entry);
int enumeration_index;
// Preserve the enumeration index unless the property was deleted.
if (original_details.IsDeleted()) {
- enumeration_index = property_dictionary()->NextEnumerationIndex();
- property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
+ enumeration_index = property_dictionary->NextEnumerationIndex();
+ property_dictionary->SetNextEnumerationIndex(enumeration_index + 1);
} else {
enumeration_index = original_details.dictionary_index();
ASSERT(enumeration_index > 0);
@@ -712,17 +727,15 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name,
details = PropertyDetails(
details.attributes(), details.type(), enumeration_index);
- if (IsGlobalObject()) {
- PropertyCell* cell =
- PropertyCell::cast(property_dictionary()->ValueAt(entry));
- MaybeObject* maybe_type = cell->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(
+ PropertyCell::cast(property_dictionary->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Please note we have to update the property details.
- property_dictionary()->DetailsAtPut(entry, details);
+ property_dictionary->DetailsAtPut(entry, details);
} else {
- property_dictionary()->SetEntry(entry, name, value, details);
+ property_dictionary->SetEntry(entry, *name, *value, details);
}
- return value;
}
@@ -733,12 +746,6 @@ Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict,
}
-static void CellSetValueInferType(Handle<PropertyCell> cell,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION_VOID(cell->GetIsolate(), cell->SetValueInferType(*value));
-}
-
-
Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode) {
@@ -761,7 +768,8 @@ Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
object->set_map(*new_map);
}
Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- CellSetValueInferType(cell, isolate->factory()->the_hole_value());
+ Handle<Object> value = isolate->factory()->the_hole_value();
+ PropertyCell::SetValueInferType(cell, value);
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate);
@@ -817,17 +825,24 @@ MaybeObject* Object::GetPropertyOrFail(Handle<Object> object,
}
+// TODO(yangguo): handlify this and get rid of.
MaybeObject* Object::GetProperty(Object* receiver,
LookupResult* result,
Name* name,
PropertyAttributes* attributes) {
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
-
Isolate* isolate = name->GetIsolate();
Heap* heap = isolate->heap();
+#ifdef DEBUG
+ // TODO(mstarzinger): Only because of the AssertNoContextChange, drop as soon
+ // as this method has been fully handlified.
+ HandleScope scope(isolate);
+#endif
+
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
// Traverse the prototype chain from the current object (this) to
// the holder and check for access rights. This avoids traversing the
// objects more than once in case of interceptors, because the
@@ -849,11 +864,16 @@ MaybeObject* Object::GetProperty(Object* receiver,
// property from the current object, we still check that we have
// access to it.
JSObject* checked = JSObject::cast(current);
- if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- return checked->GetPropertyWithFailedAccessCheck(receiver,
- result,
- name,
- attributes);
+ if (!isolate->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithFailedAccessCheck(
+ handle(checked, isolate),
+ handle(receiver, isolate),
+ result,
+ handle(name, isolate),
+ attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
}
}
// Stop traversing the chain once we reach the last object in the
@@ -884,14 +904,28 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
case CONSTANT:
return result->GetConstant();
- case CALLBACKS:
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
+ case CALLBACKS: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(result->GetCallbackObject(), isolate),
+ handle(name, isolate));
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case HANDLER:
return result->proxy()->GetPropertyWithHandler(receiver, name);
- case INTERCEPTOR:
- return result->holder()->GetPropertyWithInterceptor(
- receiver, name, attributes);
+ case INTERCEPTOR: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithInterceptor(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(name, isolate),
+ attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
@@ -1026,8 +1060,11 @@ bool Object::SameValue(Object* other) {
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
- return (this_value == other_value) ||
- (std::isnan(this_value) && std::isnan(other_value));
+ bool equal = this_value == other_value;
+ // SameValue(NaN, NaN) is true.
+ if (!equal) return std::isnan(this_value) && std::isnan(other_value);
+ // SameValue(0.0, -0.0) is false.
+ return (this_value != 0) || ((1 / this_value) == (1 / other_value));
}
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
@@ -1167,7 +1204,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1224,7 +1261,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1709,6 +1746,9 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case FIXED_ARRAY_TYPE:
FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ reinterpret_cast<ConstantPoolArray*>(this)->ConstantPoolIterateBody(v);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
break;
case JS_OBJECT_TYPE:
@@ -1871,211 +1911,248 @@ String* JSReceiver::constructor_name() {
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
- Name* name,
- Object* value,
- int field_index,
- Representation representation) {
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> NewStorageFor(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation) {
+ Heap* heap = isolate->heap();
+ CALL_HEAP_FUNCTION(isolate,
+ object->AllocateNewStorageFor(heap, representation),
+ Object);
+}
+
+
+void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation) {
+ Isolate* isolate = object->GetIsolate();
+
// This method is used to transition to a field. If we are transitioning to a
// double field, allocate new storage.
- Object* storage;
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(GetHeap(), representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
+ Handle<Object> storage = NewStorageFor(isolate, value, representation);
- if (map()->unused_property_fields() == 0) {
+ if (object->map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
- FixedArray* values;
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
+ Handle<FixedArray> properties(object->properties());
+ Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
+ properties, properties->length() + new_unused + 1);
+ object->set_properties(*values);
+ }
- set_properties(values);
+ object->set_map(*new_map);
+ object->FastPropertyAtPut(field_index, *storage);
+}
+
+
+static MaybeObject* CopyAddFieldDescriptor(Map* map,
+ Name* name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ Map* new_map;
+ FieldDescriptor new_field_desc(name, index, attributes, representation);
+ MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ int unused_property_fields = map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
}
+ new_map->set_unused_property_fields(unused_property_fields);
+ return new_map;
+}
- set_map(new_map);
- FastPropertyAtPut(field_index, storage);
- return value;
+static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddFieldDescriptor(
+ *map, *name, index, attributes, representation, flag),
+ Map);
}
-MaybeObject* JSObject::AddFastProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode,
- ValueType value_type,
- TransitionFlag flag) {
- ASSERT(!IsJSGlobalProxy());
+void JSObject::AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag) {
+ ASSERT(!object->IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
- map()->instance_descriptors()->Search(
- name, map()->NumberOfOwnDescriptors()));
+ object->map()->instance_descriptors()->Search(
+ *name, object->map()->NumberOfOwnDescriptors()));
// Normalize the object if the name is an actual name (not the
// hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
- Isolate* isolate = GetHeap()->isolate();
- if (!name->IsCacheable(isolate) || TooManyFastProperties(store_mode)) {
- MaybeObject* maybe_failure =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return AddSlowProperty(name, value, attributes);
+ Isolate* isolate = object->GetIsolate();
+ if (!name->IsCacheable(isolate) ||
+ object->TooManyFastProperties(store_mode)) {
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
+ return;
}
// Compute the new index for new field.
- int index = map()->NextFreePropertyIndex();
+ int index = object->map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- if (IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
+ if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
Representation representation = value->OptimalRepresentation(value_type);
+ Handle<Map> new_map = CopyAddFieldDescriptor(
+ handle(object->map()), name, index, attributes, representation, flag);
- FieldDescriptor new_field(name, index, attributes, representation);
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
+}
- int unused_property_fields = map()->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += kFieldsAdded;
- }
- new_map->set_unused_property_fields(unused_property_fields);
- return AddFastPropertyUsingMap(new_map, name, value, index, representation);
+static MaybeObject* CopyAddConstantDescriptor(Map* map,
+ Name* name,
+ Object* value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ ConstantDescriptor new_constant_desc(name, value, attributes);
+ return map->CopyAddDescriptor(&new_constant_desc, flag);
}
-MaybeObject* JSObject::AddConstantProperty(
- Name* name,
- Object* constant,
- PropertyAttributes attributes,
- TransitionFlag initial_flag) {
- // Allocate new instance descriptors with (name, constant) added
- ConstantDescriptor d(name, constant, attributes);
+static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddConstantDescriptor(
+ *map, *name, *value, attributes, flag),
+ Map);
+}
+
+void JSObject::AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag initial_flag) {
TransitionFlag flag =
// Do not add transitions to global objects.
- (IsGlobalObject() ||
+ (object->IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
attributes != NONE)
? OMIT_TRANSITION
: initial_flag;
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ // Allocate new instance descriptors with (name, constant) added.
+ Handle<Map> new_map = CopyAddConstantDescriptor(
+ handle(object->map()), name, constant, attributes, flag);
- set_map(new_map);
- return constant;
+ object->set_map(*new_map);
}
-// Add property in slow mode
-MaybeObject* JSObject::AddSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!HasFastProperties());
- NameDictionary* dict = property_dictionary();
- Object* store_value = value;
- if (IsGlobalObject()) {
+void JSObject::AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dict(object->property_dictionary());
+ if (object->IsGlobalObject()) {
// In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(name);
+ int entry = dict->FindEntry(*name);
if (entry != NameDictionary::kNotFound) {
- store_value = dict->ValueAt(entry);
- MaybeObject* maybe_type =
- PropertyCell::cast(store_value)->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, store_value, details);
- return value;
- }
- Heap* heap = GetHeap();
- { MaybeObject* maybe_store_value =
- heap->AllocatePropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
+ dict->SetEntry(entry, *name, *cell, details);
+ return;
}
- MaybeObject* maybe_type =
- PropertyCell::cast(store_value)->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value);
+ PropertyCell::SetValueInferType(cell, value);
+ value = cell;
}
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- Object* result;
- { MaybeObject* maybe_result = dict->Add(name, store_value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (dict != result) set_properties(NameDictionary::cast(result));
- return value;
+ Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details);
+ if (*dict != *result) object->set_properties(*result);
}
-MaybeObject* JSObject::AddProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check,
- ValueType value_type,
- StoreMode mode,
- TransitionFlag transition_flag) {
- ASSERT(!IsJSGlobalProxy());
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- MaybeObject* result;
+Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type,
+ StoreMode mode,
+ TransitionFlag transition_flag) {
+ ASSERT(!object->IsJSGlobalProxy());
+ Isolate* isolate = object->GetIsolate();
+
+ if (!name->IsUniqueName()) {
+ name = isolate->factory()->InternalizedStringFromString(
+ Handle<String>::cast(name));
+ }
+
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
- !map_of_this->is_extensible()) {
+ !object->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return value;
} else {
- Handle<Object> args[1] = {Handle<Name>(name)};
- return isolate->Throw(
- *isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ Handle<Object> args[1] = { name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- if (HasFastProperties()) {
+ if (object->HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (map_of_this->NumberOfOwnDescriptors() <
+ if (object->map()->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
// TODO(verwaest): Support other constants.
// if (mode == ALLOW_AS_CONSTANT &&
// !value->IsTheHole() &&
// !value->IsConsString()) {
if (value->IsJSFunction()) {
- result = AddConstantProperty(name, value, attributes, transition_flag);
+ AddConstantProperty(object, name, value, attributes, transition_flag);
} else {
- result = AddFastProperty(
- name, value, attributes, store_mode, value_type, transition_flag);
+ AddFastProperty(object, name, value, attributes, store_mode,
+ value_type, transition_flag);
}
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj;
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe->To(&obj)) return maybe;
- result = AddSlowProperty(name, value, attributes);
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
}
} else {
- result = AddSlowProperty(name, value, attributes);
+ AddSlowProperty(object, name, value, attributes);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this, isolate),
- "new",
- handle(name, isolate),
- handle(heap->the_hole_value(), isolate));
+ if (FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string()) {
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ EnqueueChangeRecord(object, "new", name, old_value);
}
- return *hresult;
+ return value;
}
@@ -2115,37 +2192,39 @@ void JSObject::DeliverChangeRecords(Isolate* isolate) {
}
-MaybeObject* JSObject::SetPropertyPostInterceptor(
- Name* name,
- Object* value,
+Handle<Object> JSObject::SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreMode mode) {
+ StrictModeFlag strict_mode) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) map()->LookupTransition(this, name, &result);
+ LookupResult result(object->GetIsolate());
+ object->LocalLookupRealNamedProperty(*name, &result);
+ if (!result.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &result);
+ }
if (result.IsFound()) {
// An existing property or a map transition was found. Use set property to
// handle all these cases.
- return SetProperty(&result, name, value, attributes, strict_mode);
+ return SetPropertyForResult(object, &result, name, value, attributes,
+ strict_mode, MAY_BE_STORE_FROM_KEYED);
}
bool done = false;
- MaybeObject* result_object =
- SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
// Add a new real property.
- return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK,
- OPTIMAL_REPRESENTATION, mode);
+ return AddProperty(object, name, value, attributes, strict_mode);
}
-MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes) {
- NameDictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindEntry(name);
+static void ReplaceSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ NameDictionary* dictionary = object->property_dictionary();
+ int old_index = dictionary->FindEntry(*name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
@@ -2153,7 +2232,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
}
PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- return SetNormalizedProperty(name, value, new_details);
+ JSObject::SetNormalizedProperty(object, name, value, new_details);
}
@@ -2219,6 +2298,13 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
}
+
+ // The array may not be moved during GC,
+ // and size has to be adjusted nevertheless.
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
+ }
}
@@ -2275,28 +2361,27 @@ bool Map::InstancesNeedRewriting(Map* target,
// to temporarily store the inobject properties.
// * If there are properties left in the backing store, install the backing
// store.
-MaybeObject* JSObject::MigrateToMap(Map* new_map) {
- Heap* heap = GetHeap();
- Map* old_map = map();
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map(object->map());
int number_of_fields = new_map->NumberOfFields();
int inobject = new_map->inobject_properties();
int unused = new_map->unused_property_fields();
- // Nothing to do if no functions were converted to fields.
+ // Nothing to do if no functions were converted to fields and no smis were
+ // converted to doubles.
if (!old_map->InstancesNeedRewriting(
- new_map, number_of_fields, inobject, unused)) {
- set_map(new_map);
- return this;
+ *new_map, number_of_fields, inobject, unused)) {
+ object->set_map(*new_map);
+ return;
}
int total_size = number_of_fields + unused;
int external = total_size - inobject;
- FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(total_size);
- if (!maybe_array->To(&array)) return maybe_array;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+ Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
int descriptors = new_map->NumberOfOwnDescriptors();
for (int i = 0; i < descriptors; i++) {
@@ -2309,69 +2394,72 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
}
ASSERT(old_details.type() == CONSTANT ||
old_details.type() == FIELD);
- Object* value = old_details.type() == CONSTANT
+ Object* raw_value = old_details.type() == CONSTANT
? old_descriptors->GetValue(i)
- : RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ Handle<Object> value(raw_value, isolate);
if (FLAG_track_double_fields &&
!old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
- if (old_details.representation().IsNone()) value = Smi::FromInt(0);
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to migrate the instance.
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, details.representation(), TENURED);
- if (!maybe_storage->To(&value)) return maybe_storage;
+ if (old_details.representation().IsNone()) {
+ value = handle(Smi::FromInt(0), isolate);
+ }
+ value = NewStorageFor(isolate, value, details.representation());
}
ASSERT(!(FLAG_track_double_fields &&
details.representation().IsDouble() &&
value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
- array->set(target_index, value);
+ array->set(target_index, *value);
}
- // From here on we cannot fail anymore.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
- FastPropertyAtPut(i, array->get(external + i));
+ object->FastPropertyAtPut(i, array->get(external + i));
}
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- Address address = this->address() + new_instance_size;
- heap->CreateFillerObjectAt(address, instance_size_delta);
+ Address address = object->address() + new_instance_size;
+ isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject);
- set_properties(array);
+ RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ object->set_properties(*array);
}
- set_map(new_map);
-
- return this;
+ object->set_map(*new_map);
}
-MaybeObject* JSObject::GeneralizeFieldRepresentation(
- int modify_index,
- Representation new_representation,
- StoreMode store_mode) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->GeneralizeRepresentation(
- modify_index, new_representation, store_mode);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (map() == new_map) return this;
+Handle<TransitionArray> Map::AddTransition(Handle<Map> map,
+ Handle<Name> key,
+ Handle<Map> target,
+ SimpleTransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->AddTransition(*key, *target, flag),
+ TransitionArray);
+}
+
- return MigrateToMap(new_map);
+void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<Map> new_map = Map::GeneralizeRepresentation(
+ handle(object->map()), modify_index, new_representation, store_mode);
+ if (object->map() == *new_map) return;
+ return MigrateToMap(object, new_map);
}
@@ -2385,14 +2473,12 @@ int Map::NumberOfFields() {
}
-MaybeObject* Map::CopyGeneralizeAllRepresentations(
- int modify_index,
- StoreMode store_mode,
- PropertyAttributes attributes,
- const char* reason) {
- Map* new_map;
- MaybeObject* maybe_map = this->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
+Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason) {
+ Handle<Map> new_map = Copy(map);
DescriptorArray* descriptors = new_map->instance_descriptors();
descriptors->InitializeRepresentations(Representation::Tagged());
@@ -2414,7 +2500,7 @@ MaybeObject* Map::CopyGeneralizeAllRepresentations(
}
if (FLAG_trace_generalization) {
- PrintGeneralization(stdout, reason, modify_index,
+ map->PrintGeneralization(stdout, reason, modify_index,
new_map->NumberOfOwnDescriptors(),
new_map->NumberOfOwnDescriptors(),
details.type() == CONSTANT && store_mode == FORCE_FIELD,
@@ -2562,11 +2648,11 @@ Map* Map::FindLastMatchMap(int verbatim,
// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
// - Otherwise, invalidate the outdated transition target from |updated|, and
// replace its transition tree with a new branch for the updated descriptors.
-MaybeObject* Map::GeneralizeRepresentation(int modify_index,
- Representation new_representation,
- StoreMode store_mode) {
- Map* old_map = this;
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
Representation old_representation = old_details.representation();
@@ -2582,37 +2668,37 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
}
int descriptors = old_map->NumberOfOwnDescriptors();
- Map* root_map = old_map->FindRootMap();
+ Handle<Map> root_map(old_map->FindRootMap());
// Check the state of the root map.
- if (!old_map->EquivalentToForTransition(root_map)) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode, old_details.attributes(), "not equivalent");
+ if (!old_map->EquivalentToForTransition(*root_map)) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "not equivalent");
}
int verbatim = root_map->NumberOfOwnDescriptors();
if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode,
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
old_details.attributes(), "root modification");
}
- Map* updated = root_map->FindUpdatedMap(
- verbatim, descriptors, old_descriptors);
- if (updated == NULL) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode, old_details.attributes(), "incompatible");
+ Map* raw_updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, *old_descriptors);
+ if (raw_updated == NULL) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "incompatible");
}
- DescriptorArray* updated_descriptors = updated->instance_descriptors();
+ Handle<Map> updated(raw_updated);
+ Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors());
int valid = updated->NumberOfOwnDescriptors();
// Directly change the map if the target map is more general. Ensure that the
// target type of the modify_index is a FIELD, unless we are migrating.
if (updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, old_descriptors) &&
+ verbatim, valid, descriptors, *old_descriptors) &&
(store_mode == ALLOW_AS_CONSTANT ||
updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
Representation updated_representation =
@@ -2620,10 +2706,9 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
if (new_representation.fits_into(updated_representation)) return updated;
}
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = updated_descriptors->Merge(
- verbatim, valid, descriptors, modify_index, store_mode, old_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge(
+ updated_descriptors, verbatim, valid, descriptors, modify_index,
+ store_mode, old_descriptors);
ASSERT(store_mode == ALLOW_AS_CONSTANT ||
new_descriptors->GetDetails(modify_index).type() == FIELD);
@@ -2635,8 +2720,8 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
new_descriptors->SetRepresentation(modify_index, updated_representation);
}
- Map* split_map = root_map->FindLastMatchMap(
- verbatim, descriptors, new_descriptors);
+ Handle<Map> split_map(root_map->FindLastMatchMap(
+ verbatim, descriptors, *new_descriptors));
int split_descriptors = split_map->NumberOfOwnDescriptors();
// This is shadowed by |updated_descriptors| being more general than
@@ -2645,28 +2730,20 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
int descriptor = split_descriptors;
split_map->DeprecateTarget(
- old_descriptors->GetKey(descriptor), new_descriptors);
+ old_descriptors->GetKey(descriptor), *new_descriptors);
if (FLAG_trace_generalization) {
- PrintGeneralization(
+ old_map->PrintGeneralization(
stdout, "", modify_index, descriptor, descriptors,
old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
store_mode == FORCE_FIELD,
old_representation, updated_representation);
}
- Map* new_map = split_map;
// Add missing transitions.
+ Handle<Map> new_map = split_map;
for (; descriptor < descriptors; descriptor++) {
- MaybeObject* maybe_map = new_map->CopyInstallDescriptors(
- descriptor, new_descriptors);
- if (!maybe_map->To(&new_map)) {
- // Create a handle for the last created map to ensure it stays alive
- // during GC. Its descriptor array is too large, but it will be
- // overwritten during retry anyway.
- Handle<Map>(new_map);
- return maybe_map;
- }
+ new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
new_map->set_migration_target(true);
}
@@ -2675,6 +2752,21 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
}
+// Generalize the representation of all FIELD descriptors.
+Handle<Map> Map::GeneralizeAllFieldRepresentations(
+ Handle<Map> map,
+ Representation new_representation) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() == FIELD) {
+ map = GeneralizeRepresentation(map, i, new_representation, FORCE_FIELD);
+ }
+ }
+ return map;
+}
+
+
Map* Map::CurrentMapForDeprecated() {
DisallowHeapAllocation no_allocation;
if (!is_deprecated()) return this;
@@ -2703,94 +2795,66 @@ Map* Map::CurrentMapForDeprecated() {
}
-MaybeObject* JSObject::SetPropertyWithInterceptor(
- Name* name,
- Object* value,
+Handle<Object> JSObject::SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> this_handle(this);
- Handle<String> name_handle(String::cast(name));
- Handle<Object> value_handle(value, isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ Isolate* isolate = object->GetIsolate();
+ Handle<String> name_string = Handle<String>::cast(name);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
v8::NamedPropertySetterCallback setter =
v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
+ Handle<Object> value_unhole = value->IsTheHole()
+ ? Handle<Object>(isolate->factory()->undefined_value()) : value;
v8::Handle<v8::Value> result = args.Call(setter,
- v8::Utils::ToLocal(name_handle),
+ v8::Utils::ToLocal(name_string),
v8::Utils::ToLocal(value_unhole));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) return value;
}
- MaybeObject* raw_result =
- this_handle->SetPropertyPostInterceptor(*name_handle,
- *value_handle,
- attributes,
- strict_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> result =
+ SetPropertyPostInterceptor(object, name, value, attributes, strict_mode);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return result;
}
Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode),
- Object);
-}
-
-
-MaybeObject* JSReceiver::SetPropertyOrFail(
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(
- object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode, store_mode));
-}
-
-
-MaybeObject* JSReceiver::SetProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- LookupResult result(GetIsolate());
- LocalLookup(name, &result, true);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ LookupResult result(object->GetIsolate());
+ object->LocalLookup(*name, &result, true);
if (!result.IsFound()) {
- map()->LookupTransition(JSObject::cast(this), name, &result);
+ object->map()->LookupTransition(JSObject::cast(*object), *name, &result);
}
- return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
+ return SetProperty(object, &result, name, value, attributes, strict_mode,
+ store_mode);
}
-MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
- Name* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -2798,26 +2862,27 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(
- isolate, this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (obj->IsFailure()) return obj;
- return *value_handle;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_AND_RETRY_OR_DIE(isolate,
+ (callback->setter)(
+ isolate, *object, *value, callback->data),
+ break,
+ return Handle<Object>());
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(this)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(this, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
+ if (!data->IsCompatibleReceiver(*object)) {
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
@@ -2825,32 +2890,33 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
v8::AccessorSetterCallback call_fun =
v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
- Handle<String> key(String::cast(name));
- LOG(isolate, ApiNamedPropertyAccess("store", this, name));
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *name));
PropertyCallbackArguments args(
- isolate, data->data(), this, JSObject::cast(holder));
+ isolate, data->data(), *object, JSObject::cast(*holder));
args.Call(call_fun,
v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Object* setter = AccessorPair::cast(structure)->setter();
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Name> key(name);
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
@@ -2860,32 +2926,33 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
}
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value) {
- Isolate* isolate = GetIsolate();
- Handle<Object> value_handle(value, isolate);
- Handle<JSReceiver> fun(setter, isolate);
- Handle<JSReceiver> self(this, isolate);
+Handle<Object> JSReceiver::SetPropertyWithDefinedSetter(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value) {
+ Isolate* isolate = object->GetIsolate();
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
+ if (debug->StepInActive() && setter->IsJSFunction()) {
debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false);
}
#endif
+
bool has_pending_exception;
- Handle<Object> argv[] = { value_handle };
+ Handle<Object> argv[] = { value };
Execution::Call(
- isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *value_handle;
+ if (has_pending_exception) return Handle<Object>();
+ return value;
}
@@ -2899,14 +2966,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
pt != heap->null_value();
pt = pt->GetPrototype(GetIsolate())) {
if (pt->IsJSProxy()) {
- String* name;
- MaybeObject* maybe = heap->Uint32ToString(index);
- if (!maybe->To<String>(&name)) {
- *found = true; // Force abort
- return maybe;
- }
- return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
- this, name, value, NONE, strict_mode, found);
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSProxy> proxy(JSProxy::cast(pt));
+ Handle<JSObject> self(this, isolate);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ Handle<Object> value_handle(value, isolate);
+ Handle<Object> result = JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, self, name, value_handle, NONE, strict_mode, found);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
@@ -2918,11 +2987,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- return SetElementWithCallback(dictionary->ValueAt(entry),
- index,
- value,
- JSObject::cast(pt),
- strict_mode);
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSObject> self(this, isolate);
+ Handle<Object> structure(dictionary->ValueAt(entry), isolate);
+ Handle<Object> value_handle(value, isolate);
+ Handle<JSObject> holder(JSObject::cast(pt));
+ Handle<Object> result = SetElementWithCallback(
+ self, structure, index, value_handle, holder, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
}
@@ -2930,21 +3004,21 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
return heap->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyViaPrototypes(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+
+Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool* done) {
+ Isolate* isolate = object->GetIsolate();
*done = false;
// We could not find a local property so let's check whether there is an
// accessor that wants to handle the property, or whether the property is
// read-only on the prototype chain.
LookupResult result(isolate);
- LookupRealNamedPropertyInPrototypes(name, &result);
+ object->LookupRealNamedPropertyInPrototypes(*name, &result);
if (result.IsFound()) {
switch (result.type()) {
case NORMAL:
@@ -2955,19 +3029,21 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
case INTERCEPTOR: {
PropertyAttributes attr =
result.holder()->GetPropertyAttributeWithInterceptor(
- this, name, true);
+ *object, *name, true);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
- return SetPropertyWithCallback(result.GetCallbackObject(),
- name, value, result.holder(), strict_mode);
+ Handle<Object> callback_object(result.GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(result.holder()), strict_mode);
}
case HANDLER: {
- return result.proxy()->SetPropertyViaPrototypesWithHandler(
- this, name, value, attributes, strict_mode, done);
+ Handle<JSProxy> proxy(result.proxy());
+ return JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, object, name, value, attributes, strict_mode, done);
}
case TRANSITION:
case NONEXISTENT:
@@ -2980,12 +3056,13 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
if (!FLAG_es5_readonly) *done = false;
if (*done) {
if (strict_mode == kNonStrictMode) return value;
- Handle<Object> args[] = { Handle<Object>(name, isolate),
- Handle<Object>(this, isolate)};
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return heap->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
@@ -3340,14 +3417,15 @@ void JSObject::LookupRealNamedPropertyInPrototypes(Name* name,
// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
- Object* value,
+ Handle<Name> name,
+ Handle<Object> value,
bool check_prototype,
StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
- LookupRealNamedPropertyInPrototypes(name, result);
+ object->LookupRealNamedPropertyInPrototypes(*name, result);
}
if (result->IsProperty()) {
@@ -3356,21 +3434,23 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case CALLBACKS: {
Object* obj = result->GetCallbackObject();
if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info(AccessorInfo::cast(obj));
if (info->all_can_write()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ info,
name,
value,
- result->holder(),
+ handle(result->holder()),
strict_mode);
}
} else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
+ Handle<AccessorPair> pair(AccessorPair::cast(obj));
if (pair->all_can_read()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ pair,
name,
value,
- result->holder(),
+ handle(result->holder()),
strict_mode);
}
}
@@ -3379,10 +3459,11 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case INTERCEPTOR: {
// Try lookup real named properties. Note that only property can be
// set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r(GetIsolate());
- LookupRealNamedProperty(name, &r);
+ LookupResult r(object->GetIsolate());
+ object->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r,
+ return SetPropertyWithFailedAccessCheck(object,
+ &r,
name,
value,
check_prototype,
@@ -3397,42 +3478,38 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
}
}
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> value_handle(value, isolate);
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ Isolate* isolate = object->GetIsolate();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
-MaybeObject* JSReceiver::SetProperty(LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
+Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
if (result->IsHandler()) {
- return result->proxy()->SetPropertyWithHandler(
- this, key, value, attributes, strict_mode);
+ return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
+ object, key, value, attributes, strict_mode);
} else {
- return JSObject::cast(this)->SetPropertyForResult(
+ return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object),
result, key, value, attributes, strict_mode, store_mode);
}
}
-bool JSProxy::HasPropertyWithHandler(Name* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(this, isolate);
- Handle<Object> name(name_raw, isolate);
+bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return false;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return false;
@@ -3440,58 +3517,51 @@ bool JSProxy::HasPropertyWithHandler(Name* name_raw) {
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
- Handle<Object> value(value_raw, isolate);
+Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return *value;
+ if (name->IsSymbol()) return value;
Handle<Object> args[] = { receiver, name, value };
- CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Handle<Object>();
- return *value;
+ return value;
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw,
- Object* value_raw,
+Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done) {
- Isolate* isolate = GetIsolate();
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) {
*done = false;
- return isolate->heap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
*done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return Handle<Object>();
if (result->IsUndefined()) {
*done = false;
- return isolate->heap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
// Emulate [[GetProperty]] semantics for proxies.
@@ -3500,7 +3570,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> desc = Execution::Call(
isolate, isolate->to_complete_property_descriptor(), result,
ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>();
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
@@ -3517,7 +3587,8 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
ASSERT(configurable->IsTrue());
@@ -3538,12 +3609,13 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
ASSERT(!isolate->has_pending_exception());
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
- if (!*done) return GetHeap()->the_hole_value();
- if (strict_mode == kNonStrictMode) return *value;
+ if (!*done) return isolate->factory()->the_hole_value();
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// We have an AccessorDescriptor.
@@ -3553,15 +3625,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
ASSERT(!isolate->has_pending_exception());
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return receiver->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return *value;
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
@@ -3726,44 +3799,74 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
-void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->AllocateStorageForMap(*map));
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map);
}
-void JSObject::MigrateInstance(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->MigrateInstance());
+void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
+ ASSERT(object->map()->inobject_properties() == map->inobject_properties());
+ ElementsKind obj_kind = object->map()->elements_kind();
+ ElementsKind map_kind = map->elements_kind();
+ if (map_kind != obj_kind) {
+ ElementsKind to_kind = map_kind;
+ if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
+ IsDictionaryElementsKind(obj_kind)) {
+ to_kind = obj_kind;
+ }
+ if (IsDictionaryElementsKind(to_kind)) {
+ NormalizeElements(object);
+ } else {
+ TransitionElementsKind(object, to_kind);
+ }
+ map = MapAsElementsKind(map, to_kind);
+ }
+ int total_size =
+ map->NumberOfOwnDescriptors() + map->unused_property_fields();
+ int out_of_object = total_size - map->inobject_properties();
+ if (out_of_object != object->properties()->length()) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
+ handle(object->properties()), out_of_object);
+ object->set_properties(*new_properties);
+ }
+ object->set_map(*map);
}
-Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->MigrateInstance(),
- Object);
+void JSObject::MigrateInstance(Handle<JSObject> object) {
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ Handle<Map> original_map(object->map());
+ GeneralizeFieldRepresentation(
+ object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
}
-Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
- int modify_index,
- Representation representation,
- StoreMode store_mode) {
- CALL_HEAP_FUNCTION(
- map->GetIsolate(),
- map->GeneralizeRepresentation(modify_index, representation, store_mode),
- Map);
+Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
+ Map* new_map = object->map()->CurrentMapForDeprecated();
+ if (new_map == NULL) return Handle<Object>();
+ Handle<Map> original_map(object->map());
+ JSObject::MigrateToMap(object, handle(new_map));
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
+ return object;
}
-static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Map* transition_map = lookup->GetTransitionTarget();
+Handle<Object> JSObject::SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<Map> transition_map(lookup->GetTransitionTarget());
int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
@@ -3773,8 +3876,8 @@ static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
// AddProperty will either normalize the object, or create a new fast copy
// of the map. If we get a fast copy of the map, all field representations
// will be tagged since the transition is omitted.
- return lookup->holder()->AddProperty(
- *name, *value, attributes, kNonStrictMode,
+ return JSObject::AddProperty(
+ object, name, value, attributes, kNonStrictMode,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
JSReceiver::OMIT_EXTENSIBILITY_CHECK,
JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
@@ -3785,45 +3888,41 @@ static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
// (value->IsUninitialized) as constant.
if (details.type() == CONSTANT &&
descriptors->GetValue(descriptor) == *value) {
- lookup->holder()->set_map(transition_map);
- return *value;
+ object->set_map(*transition_map);
+ return value;
}
Representation representation = details.representation();
if (!value->FitsRepresentation(representation) ||
details.type() == CONSTANT) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ transition_map = Map::GeneralizeRepresentation(transition_map,
descriptor, value->OptimalRepresentation(), FORCE_FIELD);
- if (!maybe_map->To(&transition_map)) return maybe_map;
Object* back = transition_map->GetBackPointer();
if (back->IsMap()) {
- MaybeObject* maybe_failure =
- lookup->holder()->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
+ MigrateToMap(object, handle(Map::cast(back)));
}
descriptors = transition_map->instance_descriptors();
representation = descriptors->GetDetails(descriptor).representation();
}
int field_index = descriptors->GetFieldIndex(descriptor);
- return lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
+ AddFastPropertyUsingMap(
+ object, transition_map, name, value, field_index, representation);
+ return value;
}
-static MaybeObject* SetPropertyToField(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value) {
+static void SetPropertyToField(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value) {
Representation representation = lookup->representation();
if (!value->FitsRepresentation(representation) ||
lookup->type() == CONSTANT) {
- MaybeObject* maybe_failure =
- lookup->holder()->GeneralizeFieldRepresentation(
- lookup->GetDescriptorIndex(),
- value->OptimalRepresentation(),
- FORCE_FIELD);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
+ lookup->GetDescriptorIndex(),
+ value->OptimalRepresentation(),
+ FORCE_FIELD);
DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
int descriptor = lookup->GetDescriptorIndex();
representation = desc->GetDetails(descriptor).representation();
@@ -3833,199 +3932,182 @@ static MaybeObject* SetPropertyToField(LookupResult* lookup,
HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
lookup->GetFieldIndex().field_index()));
storage->set_value(value->Number());
- return *value;
+ return;
}
lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
- return *value;
}
-static MaybeObject* ConvertAndSetLocalProperty(LookupResult* lookup,
- Name* name,
- Object* value,
- PropertyAttributes attributes) {
- JSObject* object = lookup->holder();
+static void ConvertAndSetLocalProperty(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<JSObject> object(lookup->holder());
if (object->TooManyFastProperties()) {
- MaybeObject* maybe_failure = object->NormalizeProperties(
- CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
}
if (!object->HasFastProperties()) {
- return object->ReplaceSlowProperty(name, value, attributes);
+ ReplaceSlowProperty(object, name, value, attributes);
+ return;
}
int descriptor_index = lookup->GetDescriptorIndex();
if (lookup->GetAttributes() == attributes) {
- MaybeObject* maybe_failure = object->GeneralizeFieldRepresentation(
- descriptor_index, Representation::Tagged(), FORCE_FIELD);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::GeneralizeFieldRepresentation(
+ object, descriptor_index, Representation::Tagged(), FORCE_FIELD);
} else {
- Map* map;
- MaybeObject* maybe_map = object->map()->CopyGeneralizeAllRepresentations(
+ Handle<Map> old_map(object->map());
+ Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
- if (!maybe_map->To(&map)) return maybe_map;
- MaybeObject* maybe_failure = object->MigrateToMap(map);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::MigrateToMap(object, new_map);
}
DescriptorArray* descriptors = object->map()->instance_descriptors();
int index = descriptors->GetDetails(descriptor_index).field_index();
- object->FastPropertyAtPut(index, value);
- return value;
+ object->FastPropertyAtPut(index, *value);
}
-static MaybeObject* SetPropertyToFieldWithAttributes(
- LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
+static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
if (lookup->GetAttributes() == attributes) {
- if (value->IsUninitialized()) return *value;
- return SetPropertyToField(lookup, name, value);
+ if (value->IsUninitialized()) return;
+ SetPropertyToField(lookup, name, value);
} else {
- return ConvertAndSetLocalProperty(lookup, *name, *value, attributes);
+ ConvertAndSetLocalProperty(lookup, name, value, attributes);
}
}
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc(isolate);
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We internalize these short keys to avoid constantly
// reallocating them.
- if (name_raw->IsString() && !name_raw->IsInternalizedString() &&
- String::cast(name_raw)->length() <= 2) {
- Object* internalized_version;
- { MaybeObject* maybe_string_version =
- heap->InternalizeString(String::cast(name_raw));
- if (maybe_string_version->ToObject(&internalized_version)) {
- name_raw = String::cast(internalized_version);
- }
- }
+ if (name->IsString() && !name->IsInternalizedString() &&
+ Handle<String>::cast(name)->length() <= 2) {
+ name = isolate->factory()->InternalizeString(Handle<String>::cast(name));
}
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(
- lookup, name_raw, value_raw, true, strict_mode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
+ true, strict_mode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetPropertyForResult(
- lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
+ return SetPropertyForResult(Handle<JSObject>::cast(proto),
+ lookup, name, value, attributes, strict_mode, store_mode);
}
- ASSERT(!lookup->IsFound() || lookup->holder() == this ||
+ ASSERT(!lookup->IsFound() || lookup->holder() == *object ||
lookup->holder()->map()->is_hidden_prototype());
- // From this point on everything needs to be handlified, because
- // SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
+ if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) {
bool done = false;
- MaybeObject* result_object = self->SetPropertyViaPrototypes(
- *name, *value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
}
if (!lookup->IsFound()) {
// Neither properties nor transitions found.
- return self->AddProperty(
- *name, *value, attributes, strict_mode, store_mode);
+ return AddProperty(
+ object, name, value, attributes, strict_mode, store_mode);
}
if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
- Handle<Object> args[] = { name, self };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
} else {
- return *value;
+ return value;
}
}
- Handle<Object> old_value(heap->the_hole_value(), isolate);
- if (FLAG_harmony_observation &&
- map()->is_observed() && lookup->IsDataProperty()) {
- old_value = Object::GetProperty(self, name);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
+ if (is_observed && lookup->IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
}
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- MaybeObject* result = *value;
+ Handle<Object> result = value;
switch (lookup->type()) {
case NORMAL:
- result = lookup->holder()->SetNormalizedProperty(lookup, *value);
+ SetNormalizedProperty(handle(lookup->holder()), lookup, value);
break;
case FIELD:
- result = SetPropertyToField(lookup, name, value);
+ SetPropertyToField(lookup, name, value);
break;
case CONSTANT:
// Only replace the constant if necessary.
- if (*value == lookup->GetConstant()) return *value;
- result = SetPropertyToField(lookup, name, value);
+ if (*value == lookup->GetConstant()) return value;
+ SetPropertyToField(lookup, name, value);
break;
case CALLBACKS: {
- Object* callback_object = lookup->GetCallbackObject();
- return self->SetPropertyWithCallback(
- callback_object, *name, *value, lookup->holder(), strict_mode);
+ Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(lookup->holder()), strict_mode);
}
case INTERCEPTOR:
- result = lookup->holder()->SetPropertyWithInterceptor(
- *name, *value, attributes, strict_mode);
+ result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value,
+ attributes, strict_mode);
break;
- case TRANSITION: {
- result = SetPropertyUsingTransition(lookup, name, value, attributes);
+ case TRANSITION:
+ result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
+ name, value, attributes);
break;
- }
case HANDLER:
case NONEXISTENT:
UNREACHABLE();
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
- if (FLAG_harmony_observation && self->map()->is_observed()) {
+ if (is_observed) {
if (lookup->IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "new", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
+ object->LocalLookup(*name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(self, name);
+ Handle<Object> new_value = Object::GetProperty(object, name);
if (!new_value->SameValue(*old_value)) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "updated", name, old_value);
}
}
}
}
- return *hresult;
+ return result;
}
@@ -4063,142 +4145,116 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
// doesn't handle function prototypes correctly.
Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type,
StoreMode mode,
ExtensibilityCheck extensibility_check) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(
- *key, *value, attributes, value_type, mode, extensibility_check),
- Object);
-}
-
+ Isolate* isolate = object->GetIsolate();
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- ValueType value_type,
- StoreMode mode,
- ExtensibilityCheck extensibility_check) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
- Isolate* isolate = GetIsolate();
+ AssertNoContextChange ncc(isolate);
+
LookupResult lookup(isolate);
- LocalLookup(name_raw, &lookup, true);
- if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
+ object->LocalLookup(*name, &lookup, true);
+ if (!lookup.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &lookup);
+ }
+
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&lookup,
- name_raw,
- value_raw,
- false,
- kNonStrictMode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
+ false, kNonStrictMode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name_raw,
- value_raw,
- attributes,
- value_type,
- mode,
- extensibility_check);
+ return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
+ name, value, attributes, value_type, mode, extensibility_check);
}
if (lookup.IsFound() &&
(lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
- LocalLookupRealNamedProperty(name_raw, &lookup);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(
- name_raw, value_raw, attributes, kNonStrictMode,
+ return AddProperty(object, name, value, attributes, kNonStrictMode,
MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsProperty()) {
if (lookup.IsDataProperty()) old_value =
- Object::GetProperty(self, name);
+ Object::GetProperty(object, name);
old_attributes = lookup.GetAttributes();
}
// Check of IsReadOnly removed from here in clone.
- MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL:
- result = self->ReplaceSlowProperty(*name, *value, attributes);
+ ReplaceSlowProperty(object, name, value, attributes);
break;
case FIELD:
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
break;
case CONSTANT:
// Only replace the constant if necessary.
if (lookup.GetAttributes() != attributes ||
*value != lookup.GetConstant()) {
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
}
break;
case CALLBACKS:
- result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes);
+ ConvertAndSetLocalProperty(&lookup, name, value, attributes);
break;
- case TRANSITION:
- result = SetPropertyUsingTransition(&lookup, name, value, attributes);
+ case TRANSITION: {
+ Handle<Object> result = SetPropertyUsingTransition(
+ handle(lookup.holder()), &lookup, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
break;
+ }
case NONEXISTENT:
case HANDLER:
case INTERCEPTOR:
UNREACHABLE();
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
if (is_observed) {
if (lookup.IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "new", name, old_value);
} else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigured", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
+ object->LocalLookup(*name, &new_lookup, true);
bool value_changed = false;
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(self, name);
+ Handle<Object> new_value = Object::GetProperty(object, name);
value_changed = !old_value->SameValue(*new_value);
}
if (new_lookup.GetAttributes() != old_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigured", name, old_value);
} else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "updated", name, old_value);
}
}
}
- return *hresult;
+ return value;
}
@@ -4235,7 +4291,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
@@ -4370,7 +4426,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSReceiver> hreceiver(receiver);
@@ -4422,52 +4478,49 @@ PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
}
-MaybeObject* NormalizedMapCache::Get(JSObject* obj,
- PropertyNormalizationMode mode) {
- Isolate* isolate = obj->GetIsolate();
- Map* fast = obj->map();
- int index = fast->Hash() % kEntries;
- Object* result = get(index);
+Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> obj,
+ PropertyNormalizationMode mode) {
+ int index = obj->map()->Hash() % kEntries;
+ Handle<Object> result = handle(cache->get(index), cache->GetIsolate());
if (result->IsMap() &&
- Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
+ Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(),
+ mode)) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- Map::cast(result)->SharedMapVerify();
+ Handle<Map>::cast(result)->SharedMapVerify();
}
#endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
// applied to the shared map.
- Object* fresh;
- MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- STATIC_ASSERT(Map::kDependentCodeOffset ==
- Map::kCodeCacheOffset + kPointerSize);
- int offset = Map::kDependentCodeOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
+ Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode,
+ SHARED_NORMALIZED_MAP);
+
+ ASSERT(memcmp(fresh->address(),
+ Handle<Map>::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ STATIC_ASSERT(Map::kDependentCodeOffset ==
+ Map::kCodeCacheOffset + kPointerSize);
+ int offset = Map::kDependentCodeOffset + kPointerSize;
+ ASSERT(memcmp(fresh->address() + offset,
+ Handle<Map>::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
#endif
- return result;
+ return Handle<Map>::cast(result);
}
- { MaybeObject* maybe_result =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- ASSERT(Map::cast(result)->is_dictionary_map());
- set(index, result);
+ Isolate* isolate = cache->GetIsolate();
+ Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode,
+ SHARED_NORMALIZED_MAP);
+ ASSERT(map->is_dictionary_map());
+ cache->set(index, *map);
isolate->counters()->normalized_maps()->Increment();
- return result;
+ return map;
}
@@ -4483,16 +4536,6 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
Handle<Name> name,
Handle<Code> code) {
Handle<Map> map(object->map());
- if (map->is_shared()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- // Fast case maps are never marked as shared.
- ASSERT(!receiver->HasFastProperties());
- // Replace the map with an identical copy that can be safely modified.
- map = Map::CopyNormalized(map, KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- receiver->GetIsolate()->counters()->normalized_maps()->Increment();
- receiver->set_map(*map);
- }
Map::UpdateCodeCache(map, name, code);
}
@@ -4500,65 +4543,55 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
void JSObject::NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode, expected_additional_properties));
-}
-
-
-MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
- int expected_additional_properties) {
- if (!HasFastProperties()) return this;
+ if (!object->HasFastProperties()) return;
// The global object is always normalized.
- ASSERT(!IsGlobalObject());
+ ASSERT(!object->IsGlobalObject());
// JSGlobalProxy must never be normalized
- ASSERT(!IsJSGlobalProxy());
+ ASSERT(!object->IsJSGlobalProxy());
- Map* map_of_this = map();
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Map> map(object->map());
// Allocate new content.
- int real_size = map_of_this->NumberOfOwnDescriptors();
+ int real_size = map->NumberOfOwnDescriptors();
int property_count = real_size;
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
property_count += 2; // Make space for two more properties.
}
- NameDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- NameDictionary::Allocate(GetHeap(), property_count);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ Handle<NameDictionary> dictionary =
+ isolate->factory()->NewNameDictionary(property_count);
- DescriptorArray* descs = map_of_this->instance_descriptors();
+ Handle<DescriptorArray> descs(map->instance_descriptors());
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetConstant(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), NORMAL, i + 1);
- Object* value = descs->GetConstant(i);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case FIELD: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(
+ object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate);
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, i + 1);
- Object* value = RawFastPropertyAt(descs->GetFieldIndex(i));
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case CALLBACKS: {
- Object* value = descs->GetCallbacksObject(i);
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case INTERCEPTOR:
@@ -4572,62 +4605,52 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
}
- Heap* current_heap = GetHeap();
-
// Copy the next enumeration index from instance descriptor.
dictionary->SetNextEnumerationIndex(real_size + 1);
- Map* new_map;
- MaybeObject* maybe_map =
- current_heap->isolate()->context()->native_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
+ Handle<NormalizedMapCache> cache(
+ isolate->context()->native_context()->normalized_map_cache());
+ Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode);
ASSERT(new_map->is_dictionary_map());
- // We have now successfully allocated all the necessary objects.
- // Changes can now be made with the guarantee that all of them take effect.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = map_of_this->instance_size() - new_instance_size;
+ int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
-instance_size_delta);
}
- set_map(new_map);
- map_of_this->NotifyLeafMapLayoutChange();
+ object->set_map(*new_map);
+ map->NotifyLeafMapLayoutChange();
- set_properties(dictionary);
+ object->set_properties(*dictionary);
- current_heap->isolate()->counters()->props_to_dictionary()->Increment();
+ isolate->counters()->props_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object properties have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- return this;
}
void JSObject::TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
+ if (object->HasFastProperties()) return;
+ ASSERT(!object->IsGlobalObject());
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
-}
-
-
-MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
- if (HasFastProperties()) return this;
- ASSERT(!IsGlobalObject());
- return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
+ object->property_dictionary()->TransformPropertiesToFastFor(
+ *object, unused_property_fields));
}
@@ -4667,6 +4690,18 @@ static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
}
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
+ int length,
+ Handle<SeededNumberDictionary> dict) {
+ Isolate* isolate = array->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ CopyFastElementsToDictionary(
+ isolate, *array, length, *dict),
+ SeededNumberDictionary);
+}
+
+
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
@@ -5089,7 +5124,7 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
if (interceptor->deleter()->IsUndefined()) return factory->false_value();
@@ -5152,7 +5187,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
Handle<Object> old_value;
bool should_enqueue_change_record = false;
if (FLAG_harmony_observation && object->map()->is_observed()) {
- should_enqueue_change_record = object->HasLocalElement(index);
+ should_enqueue_change_record = HasLocalElement(object, index);
if (should_enqueue_change_record) {
old_value = object->GetLocalElementAccessorPair(index) != NULL
? Handle<Object>::cast(factory->the_hole_value())
@@ -5168,7 +5203,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
result = AccessorDelete(object, index, mode);
}
- if (should_enqueue_change_record && !object->HasLocalElement(index)) {
+ if (should_enqueue_change_record && !HasLocalElement(object, index)) {
Handle<String> name = factory->Uint32ToString(index);
EnqueueChangeRecord(object, "deleted", name, old_value);
}
@@ -5222,7 +5257,9 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsDataProperty()) {
old_value = Object::GetProperty(object, name);
}
@@ -5243,7 +5280,7 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
result = DeleteNormalizedProperty(object, name, mode);
}
- if (is_observed && !object->HasLocalProperty(*name)) {
+ if (is_observed && !HasLocalProperty(object, name)) {
EnqueueChangeRecord(object, "deleted", name, old_value);
}
@@ -5405,59 +5442,50 @@ bool JSObject::ReferencesObject(Object* obj) {
Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-MaybeObject* JSObject::PreventExtensions() {
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->PreventExtensions();
+ return PreventExtensions(Handle<JSObject>::cast(proto));
}
// It's not possible to seal objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// If there are fast elements we normalize.
- SeededNumberDictionary* dictionary = NULL;
- { MaybeObject* maybe = NormalizeElements();
- if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
+
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Map* new_map;
- MaybeObject* maybe = map()->Copy();
- if (!maybe->To(&new_map)) return maybe;
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- set_map(new_map);
- ASSERT(!map()->is_extensible());
- return new_map;
+ object->set_map(*new_map);
+ ASSERT(!object->map()->is_extensible());
+ return object;
}
@@ -5482,223 +5510,318 @@ static void FreezeDictionary(Dictionary* dictionary) {
}
-MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) {
+Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
// Freezing non-strict arguments should be handled elsewhere.
- ASSERT(!HasNonStrictArgumentsElements());
-
- Heap* heap = isolate->heap();
+ ASSERT(!object->HasNonStrictArgumentsElements());
- if (map()->is_frozen()) return this;
+ if (object->map()->is_frozen()) return object;
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
- heap->undefined_value(),
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
+ isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->Freeze(isolate);
+ return Freeze(Handle<JSObject>::cast(proto));
}
// It's not possible to freeze objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- SeededNumberDictionary* new_element_dictionary = NULL;
- if (!elements()->IsDictionary()) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : elements()->length();
+ Handle<SeededNumberDictionary> new_element_dictionary;
+ if (!object->elements()->IsDictionary()) {
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+ : object->elements()->length();
if (length > 0) {
int capacity = 0;
int used = 0;
- GetElementsCapacityAndUsage(&capacity, &used);
- MaybeObject* maybe_dict = SeededNumberDictionary::Allocate(heap, used);
- if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ object->GetElementsCapacityAndUsage(&capacity, &used);
+ new_element_dictionary =
+ isolate->factory()->NewSeededNumberDictionary(used);
// Move elements to a dictionary; avoid calling NormalizeElements to avoid
// unnecessary transitions.
- maybe_dict = CopyFastElementsToDictionary(isolate, elements(), length,
- new_element_dictionary);
- if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ new_element_dictionary = CopyFastElementsToDictionary(
+ handle(object->elements()), length, new_element_dictionary);
} else {
// No existing elements, use a pre-allocated empty backing store
- new_element_dictionary = heap->empty_slow_element_dictionary();
+ new_element_dictionary =
+ isolate->factory()->empty_slow_element_dictionary();
}
}
LookupResult result(isolate);
- map()->LookupTransition(this, heap->frozen_symbol(), &result);
+ Handle<Map> old_map(object->map());
+ old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
if (result.IsTransition()) {
Map* transition_map = result.GetTransitionTarget();
ASSERT(transition_map->has_dictionary_elements());
ASSERT(transition_map->is_frozen());
ASSERT(!transition_map->is_extensible());
- set_map(transition_map);
- } else if (HasFastProperties() && map()->CanHaveMoreTransitions()) {
+ object->set_map(transition_map);
+ } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
// Create a new descriptor array with fully-frozen properties
- int num_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- map()->instance_descriptors()->CopyUpToAddAttributes(num_descriptors,
- FROZEN);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyReplaceDescriptors(
- new_descriptors, INSERT_TRANSITION, heap->frozen_symbol());
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ int num_descriptors = old_map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpToAddAttributes(
+ handle(old_map->instance_descriptors()), num_descriptors, FROZEN);
+ Handle<Map> new_map = Map::CopyReplaceDescriptors(
+ old_map, new_descriptors, INSERT_TRANSITION,
+ isolate->factory()->frozen_symbol());
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- set_map(new_map);
+ object->set_map(*new_map);
} else {
// Slow path: need to normalize properties for safety
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe->IsFailure()) return maybe;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// Create a new map, since other objects with this map may be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Map* new_map;
- MaybeObject* maybe_copy = map()->Copy();
- if (!maybe_copy->To(&new_map)) return maybe_copy;
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- set_map(new_map);
+ object->set_map(*new_map);
// Freeze dictionary-mode properties
- FreezeDictionary(property_dictionary());
+ FreezeDictionary(object->property_dictionary());
}
- ASSERT(map()->has_dictionary_elements());
- if (new_element_dictionary != NULL) {
- set_elements(new_element_dictionary);
+ ASSERT(object->map()->has_dictionary_elements());
+ if (!new_element_dictionary.is_null()) {
+ object->set_elements(*new_element_dictionary);
}
- if (elements() != heap->empty_slow_element_dictionary()) {
- SeededNumberDictionary* dictionary = element_dictionary();
+ if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+ SeededNumberDictionary* dictionary = object->element_dictionary();
// Make sure we never go back to the fast case
dictionary->set_requires_slow_elements();
// Freeze all elements in the dictionary
FreezeDictionary(dictionary);
}
- return this;
+ return object;
}
-MUST_USE_RESULT MaybeObject* JSObject::SetObserved(Isolate* isolate) {
- if (map()->is_observed())
- return isolate->heap()->undefined_value();
+void JSObject::SetObserved(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
- Heap* heap = isolate->heap();
+ if (object->map()->is_observed())
+ return;
- if (!HasExternalArrayElements()) {
+ if (!object->HasExternalArrayElements()) {
// Go to dictionary mode, so that we don't skip map checks.
- MaybeObject* maybe = NormalizeElements();
- if (maybe->IsFailure()) return maybe;
- ASSERT(!HasFastElements());
+ NormalizeElements(object);
+ ASSERT(!object->HasFastElements());
}
LookupResult result(isolate);
- map()->LookupTransition(this, heap->observed_symbol(), &result);
+ object->map()->LookupTransition(*object,
+ isolate->heap()->observed_symbol(),
+ &result);
- Map* new_map;
+ Handle<Map> new_map;
if (result.IsTransition()) {
- new_map = result.GetTransitionTarget();
+ new_map = handle(result.GetTransitionTarget());
ASSERT(new_map->is_observed());
- } else if (map()->CanHaveMoreTransitions()) {
- MaybeObject* maybe_new_map = map()->CopyForObserved();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ } else if (object->map()->CanHaveMoreTransitions()) {
+ new_map = Map::CopyForObserved(handle(object->map()));
} else {
- MaybeObject* maybe_copy = map()->Copy();
- if (!maybe_copy->To(&new_map)) return maybe_copy;
+ new_map = Map::Copy(handle(object->map()));
new_map->set_is_observed(true);
}
- set_map(new_map);
+ object->set_map(*new_map);
+}
- return heap->undefined_value();
+
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object,
+ Handle<AllocationSite> site) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*object, *site), JSObject);
}
-MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return isolate->StackOverflow();
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*object), JSObject);
+}
- if (map()->is_deprecated()) {
- MaybeObject* maybe_failure = MigrateInstance();
- if (maybe_failure->IsFailure()) return maybe_failure;
+
+class JSObjectWalkVisitor {
+ public:
+ explicit JSObjectWalkVisitor(AllocationSiteContext* site_context) :
+ site_context_(site_context) {}
+ virtual ~JSObjectWalkVisitor() {}
+
+ Handle<JSObject> Visit(Handle<JSObject> object) {
+ return StructureWalk(object);
}
- Heap* heap = isolate->heap();
- Object* result;
- { MaybeObject* maybe_result = heap->CopyJSObject(this);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ virtual bool is_copying() = 0;
+
+ protected:
+ Handle<JSObject> StructureWalk(Handle<JSObject> object);
+
+ // The returned handle will be used for the object in all subsequent usages.
+ // This allows VisitObject to make a copy of the object if desired.
+ virtual Handle<JSObject> VisitObject(Handle<JSObject> object) = 0;
+ virtual Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object,
+ Handle<JSObject> value) = 0;
+
+ AllocationSiteContext* site_context() { return site_context_; }
+
+ private:
+ AllocationSiteContext* site_context_;
+};
+
+
+class JSObjectCopyVisitor: public JSObjectWalkVisitor {
+ public:
+ explicit JSObjectCopyVisitor(AllocationSiteContext* site_context)
+ : JSObjectWalkVisitor(site_context) {}
+
+ virtual bool is_copying() V8_OVERRIDE { return true; }
+
+ // The returned handle will be used for the object in all
+ // subsequent usages. This allows VisitObject to make a copy
+ // of the object if desired.
+ virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE {
+ // Only create a memento if
+ // 1) we have a JSArray, and
+ // 2) the elements kind is palatable
+ // 3) allow_mementos is true
+ Handle<JSObject> copy;
+ if (site_context()->activated() &&
+ AllocationSite::CanTrack(object->map()->instance_type()) &&
+ AllocationSite::GetMode(object->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ copy = JSObject::Copy(object, site_context()->current());
+ } else {
+ copy = JSObject::Copy(object);
+ }
+
+ return copy;
+ }
+
+ virtual Handle<JSObject> VisitElementOrProperty(
+ Handle<JSObject> object,
+ Handle<JSObject> value) V8_OVERRIDE {
+ Handle<AllocationSite> current_site = site_context()->EnterNewScope();
+ Handle<JSObject> copy_of_value = StructureWalk(value);
+ site_context()->ExitScope(current_site, value);
+ return copy_of_value;
+ }
+};
+
+
+class JSObjectCreateAllocationSitesVisitor: public JSObjectWalkVisitor {
+ public:
+ explicit JSObjectCreateAllocationSitesVisitor(
+ AllocationSiteContext* site_context)
+ : JSObjectWalkVisitor(site_context) {}
+
+ virtual bool is_copying() V8_OVERRIDE { return false; }
+
+ // The returned handle will be used for the object in all
+ // subsequent usages. This allows VisitObject to make a copy
+ // of the object if desired.
+ virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE {
+ return object;
+ }
+
+ virtual Handle<JSObject> VisitElementOrProperty(
+ Handle<JSObject> object,
+ Handle<JSObject> value) V8_OVERRIDE {
+ Handle<AllocationSite> current_site = site_context()->EnterNewScope();
+ value = StructureWalk(value);
+ site_context()->ExitScope(current_site, value);
+ return value;
+ }
+};
+
+
+Handle<JSObject> JSObjectWalkVisitor::StructureWalk(Handle<JSObject> object) {
+ bool copying = is_copying();
+ Isolate* isolate = object->GetIsolate();
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return Handle<JSObject>::null();
}
- JSObject* copy = JSObject::cast(result);
+
+ if (object->map()->is_deprecated()) {
+ JSObject::MigrateInstance(object);
+ }
+
+ Handle<JSObject> copy = VisitObject(object);
+ ASSERT(copying || copy.is_identical_to(object));
+
+ HandleScope scope(isolate);
// Deep copy local properties.
if (copy->HasFastProperties()) {
- DescriptorArray* descriptors = copy->map()->instance_descriptors();
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
int limit = copy->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
int index = descriptors->GetFieldIndex(i);
- Object* value = RawFastPropertyAt(index);
+ Handle<Object> value(object->RawFastPropertyAt(index), isolate);
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
- if (!maybe_copy->To(&value)) return maybe_copy;
+ value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
} else {
Representation representation = details.representation();
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&value)) return maybe_storage;
+ value = NewStorageFor(isolate, value, representation);
+ }
+ if (copying) {
+ copy->FastPropertyAtPut(index, *value);
}
- copy->FastPropertyAtPut(index, value);
}
} else {
- { MaybeObject* maybe_result =
- heap->AllocateFixedArray(copy->NumberOfLocalProperties());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* names = FixedArray::cast(result);
- copy->GetLocalPropertyNames(names, 0);
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
+ copy->GetLocalPropertyNames(*names, 0);
for (int i = 0; i < names->length(); i++) {
ASSERT(names->get(i)->IsString());
- String* key_string = String::cast(names->get(i));
+ Handle<String> key_string(String::cast(names->get(i)));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(key_string);
+ copy->GetLocalPropertyAttribute(*key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
if (attributes != NONE) continue;
- Object* value =
- copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
+ Handle<Object> value(
+ copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
+ isolate);
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- { MaybeObject* maybe_result =
- // Creating object copy for literals. No strict mode needed.
- copy->SetProperty(key_string, result, NONE, kNonStrictMode);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ // Creating object copy for literals. No strict mode needed.
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
+ copy, key_string, result, NONE, kNonStrictMode));
}
}
}
@@ -5712,8 +5835,8 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == heap->fixed_cow_array_map()) {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+ if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
@@ -5722,34 +5845,37 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
#endif
} else {
for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
+ Handle<Object> value(elements->get(i), isolate);
ASSERT(value->IsSmi() ||
value->IsTheHole() ||
(IsFastObjectElementsKind(copy->GetElementsKind())));
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ elements->set(i, *result);
}
- elements->set(i, result);
}
}
}
break;
}
case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* element_dictionary = copy->element_dictionary();
+ Handle<SeededNumberDictionary> element_dictionary(
+ copy->element_dictionary());
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = element_dictionary->KeyAt(i);
if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
+ Handle<Object> value(element_dictionary->ValueAt(i), isolate);
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ element_dictionary->ValueAtPut(i, *result);
}
- element_dictionary->ValueAtPut(i, result);
}
}
}
@@ -5776,6 +5902,25 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
}
+Handle<JSObject> JSObject::DeepWalk(Handle<JSObject> object,
+ AllocationSiteContext* site_context) {
+ JSObjectCreateAllocationSitesVisitor v(site_context);
+ Handle<JSObject> result = v.Visit(object);
+ ASSERT(!v.is_copying() &&
+ (result.is_null() || result.is_identical_to(object)));
+ return result;
+}
+
+
+Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object,
+ AllocationSiteContext* site_context) {
+ JSObjectCopyVisitor v(site_context);
+ Handle<JSObject> copy = v.Visit(object);
+ ASSERT(v.is_copying() && !copy.is_identical_to(object));
+ return copy;
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -6175,7 +6320,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
@@ -6186,11 +6331,13 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
bool is_element = name->AsArrayIndex(&index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
bool preexists = false;
if (is_observed) {
if (is_element) {
- preexists = object->HasLocalElement(index);
+ preexists = HasLocalElement(object, index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
old_value = Object::GetElement(isolate, object, index);
}
@@ -6361,7 +6508,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
if (name->IsString()) FlattenString(Handle<String>::cast(name));
@@ -6420,58 +6567,62 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
}
-MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
- Heap* heap = GetHeap();
+Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- RETURN_IF_SCHEDULED_EXCEPTION(heap->isolate());
- return heap->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
// Make the lookup and include prototypes.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) {
- JSObject* js_object = JSObject::cast(obj);
+ for (Handle<Object> obj = object;
+ !obj->IsNull();
+ obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
+ if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) {
+ JSObject* js_object = JSObject::cast(*obj);
SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
element->IsAccessorPair()) {
- return AccessorPair::cast(element)->GetComponent(component);
+ return handle(AccessorPair::cast(element)->GetComponent(component),
+ isolate);
}
}
}
}
} else {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- LookupResult result(heap->isolate());
- JSReceiver::cast(obj)->LocalLookup(name, &result);
+ for (Handle<Object> obj = object;
+ !obj->IsNull();
+ obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
+ LookupResult result(isolate);
+ JSReceiver::cast(*obj)->LocalLookup(*name, &result);
if (result.IsFound()) {
- if (result.IsReadOnly()) return heap->undefined_value();
+ if (result.IsReadOnly()) return isolate->factory()->undefined_value();
if (result.IsPropertyCallbacks()) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->GetComponent(component);
+ return handle(AccessorPair::cast(obj)->GetComponent(component),
+ isolate);
}
}
}
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
@@ -6504,6 +6655,14 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
+Handle<Map> Map::RawCopy(Handle<Map> map,
+ int instance_size) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->RawCopy(instance_size),
+ Map);
+}
+
+
MaybeObject* Map::RawCopy(int instance_size) {
Map* result;
MaybeObject* maybe_result =
@@ -6528,25 +6687,15 @@ MaybeObject* Map::RawCopy(int instance_size) {
Handle<Map> Map::CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- map->CopyNormalized(mode, sharing),
- Map);
-}
-
-
-MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing) {
- int new_instance_size = instance_size();
+ int new_instance_size = map->instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= inobject_properties() * kPointerSize;
+ new_instance_size -= map->inobject_properties() * kPointerSize;
}
- Map* result;
- MaybeObject* maybe_result = RawCopy(new_instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
+ Handle<Map> result = Map::RawCopy(map, new_instance_size);
if (mode != CLEAR_INOBJECT_PROPERTIES) {
- result->set_inobject_properties(inobject_properties());
+ result->set_inobject_properties(map->inobject_properties());
}
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
@@ -6660,6 +6809,16 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
}
+Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyReplaceDescriptors(*descriptors, flag, *name),
+ Map);
+}
+
+
MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
TransitionFlag flag,
Name* name,
@@ -6688,20 +6847,19 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
// Since this method is used to rewrite an existing transition tree, it can
// always insert transitions without checking.
-MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
- DescriptorArray* descriptors) {
+Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors) {
ASSERT(descriptors->IsSortedNoDuplicates());
- Map* result;
- MaybeObject* maybe_result = CopyDropDescriptors();
- if (!maybe_result->To(&result)) return maybe_result;
+ Handle<Map> result = Map::CopyDropDescriptors(map);
- result->InitializeDescriptors(descriptors);
+ result->InitializeDescriptors(*descriptors);
result->SetNumberOfOwnDescriptors(new_descriptor + 1);
- int unused_property_fields = this->unused_property_fields();
+ int unused_property_fields = map->unused_property_fields();
if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
- unused_property_fields = this->unused_property_fields() - 1;
+ unused_property_fields = map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
}
@@ -6710,14 +6868,12 @@ MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
result->set_unused_property_fields(unused_property_fields);
result->set_owns_descriptors(false);
- Name* name = descriptors->GetKey(new_descriptor);
- TransitionArray* transitions;
- MaybeObject* maybe_transitions =
- AddTransition(name, result, SIMPLE_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
+ Handle<TransitionArray> transitions = Map::AddTransition(map, name, result,
+ SIMPLE_TRANSITION);
- set_transitions(transitions);
- result->SetBackPointer(this);
+ map->set_transitions(*transitions);
+ result->SetBackPointer(*map);
return result;
}
@@ -6775,35 +6931,34 @@ MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
}
-MaybeObject* Map::CopyForObserved() {
- ASSERT(!is_observed());
+Handle<Map> Map::CopyForObserved(Handle<Map> map) {
+ ASSERT(!map->is_observed());
+
+ Isolate* isolate = map->GetIsolate();
// In case the map owned its own descriptors, share the descriptors and
// transfer ownership to the new map.
- Map* new_map;
- MaybeObject* maybe_new_map;
- if (owns_descriptors()) {
- maybe_new_map = CopyDropDescriptors();
+ Handle<Map> new_map;
+ if (map->owns_descriptors()) {
+ new_map = Map::CopyDropDescriptors(map);
} else {
- maybe_new_map = Copy();
+ new_map = Map::Copy(map);
}
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- TransitionArray* transitions;
- MaybeObject* maybe_transitions = AddTransition(GetHeap()->observed_symbol(),
- new_map,
- FULL_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- set_transitions(transitions);
+ Handle<TransitionArray> transitions =
+ Map::AddTransition(map, isolate->factory()->observed_symbol(), new_map,
+ FULL_TRANSITION);
+
+ map->set_transitions(*transitions);
new_map->set_is_observed(true);
- if (owns_descriptors()) {
- new_map->InitializeDescriptors(instance_descriptors());
- set_owns_descriptors(false);
+ if (map->owns_descriptors()) {
+ new_map->InitializeDescriptors(map->instance_descriptors());
+ map->set_owns_descriptors(false);
}
- new_map->SetBackPointer(this);
+ new_map->SetBackPointer(*map);
return new_map;
}
@@ -6904,6 +7059,16 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
}
+Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->CopyUpToAddAttributes(enumeration_index, attributes),
+ DescriptorArray);
+}
+
+
MaybeObject* DescriptorArray::CopyUpToAddAttributes(
int enumeration_index, PropertyAttributes attributes) {
if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
@@ -6992,8 +7157,6 @@ void Map::UpdateCodeCache(Handle<Map> map,
MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) {
- ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
-
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
@@ -7320,11 +7483,10 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) {
Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) {
- return LookupNormalTypeCache(name, flags);
- } else {
- return LookupDefaultCache(name, flags);
- }
+ flags = Code::RemoveTypeFromFlags(flags);
+ Object* result = LookupDefaultCache(name, flags);
+ if (result->IsCode()) return result;
+ return LookupNormalTypeCache(name, flags);
}
@@ -7338,7 +7500,7 @@ Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
if (key->IsUndefined()) return key;
if (name->Equals(Name::cast(key))) {
Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
- if (code->flags() == flags) {
+ if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
return code;
}
}
@@ -7402,9 +7564,7 @@ class CodeCacheHashTableKey : public HashTableKey {
: name_(name), flags_(flags), code_(NULL) { }
CodeCacheHashTableKey(Name* name, Code* code)
- : name_(name),
- flags_(code->flags()),
- code_(code) { }
+ : name_(name), flags_(code->flags()), code_(code) { }
bool IsMatch(Object* other) {
@@ -7676,7 +7836,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -7694,7 +7854,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -7706,11 +7866,11 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
}
-MaybeObject* FixedArray::CopySize(int new_length) {
+MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) {
Heap* heap = GetHeap();
if (new_length == 0) return heap->empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* result = FixedArray::cast(obj);
@@ -7798,6 +7958,20 @@ void DescriptorArray::CopyFrom(int dst_index,
}
+Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->Merge(verbatim, valid, new_size, modify_index,
+ store_mode, *other),
+ DescriptorArray);
+}
+
+
// Generalize the |other| descriptor array by merging it into the (at least
// partly) updated |this| descriptor array.
// The method merges two descriptor array in three parts. Both descriptor arrays
@@ -8735,7 +8909,7 @@ bool String::SlowEquals(String* other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;
@@ -8990,7 +9164,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
if (newspace->Contains(start_of_string) &&
newspace->top() == start_of_string + old_size) {
// Last allocated object in new space. Simply lower allocation top.
- *(newspace->allocation_top_address()) = start_of_string + new_size;
+ newspace->set_top(start_of_string + new_size);
} else {
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
@@ -9006,17 +9180,22 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
}
-AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
+AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
+ bool in_GC) {
// Currently, AllocationMemento objects are only allocated immediately
// after JSArrays in NewSpace, and detecting whether a JSArray has one
// involves carefully checking the object immediately after the JSArray
// (if there is one) to see if it's an AllocationMemento.
if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
- ASSERT(object->GetHeap()->InToSpace(object));
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
object->Size();
- if ((ptr_end + AllocationMemento::kSize) <=
- object->GetHeap()->NewSpaceTop()) {
+ Address top;
+ if (in_GC) {
+ top = object->GetHeap()->new_space()->FromSpacePageHigh();
+ } else {
+ top = object->GetHeap()->NewSpaceTop();
+ }
+ if ((ptr_end + AllocationMemento::kSize) <= top) {
// There is room in newspace for allocation info. Do we have some?
Map** possible_allocation_memento_map =
reinterpret_cast<Map**>(ptr_end);
@@ -9221,6 +9400,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
if (number_of_own_descriptors > 0) {
TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ set_owns_descriptors(true);
} else {
ASSERT(descriptors == GetHeap()->empty_descriptor_array());
}
@@ -9277,6 +9457,16 @@ bool Map::EquivalentToForNormalization(Map* other,
}
+void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
+ int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
+ int last_ptr_offset =
+ OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries());
+ v->VisitPointers(
+ HeapObject::RawField(this, first_ptr_offset),
+ HeapObject::RawField(this, last_ptr_offset));
+}
+
+
void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body but take care in dealing with
// the code entry.
@@ -9722,9 +9912,13 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
String* name = shared()->DebugName();
Vector<const char> filter = CStrVector(raw_filter);
if (filter.length() == 0) return name->length() == 0;
- if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
- if (filter[0] == '-' &&
- !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ if (filter[0] == '-') {
+ if (filter.length() == 1) {
+ return (name->length() != 0);
+ } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return true;
+ }
+ } else if (name->IsUtf8EqualTo(filter)) {
return true;
}
if (filter[filter.length() - 1] == '*' &&
@@ -9768,7 +9962,8 @@ bool SharedFunctionInfo::HasSourceCode() {
Handle<Object> SharedFunctionInfo::GetSourceCode() {
if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
Handle<String> source(String::cast(Script::cast(script())->source()));
- return SubString(source, start_position(), end_position());
+ return GetIsolate()->factory()->NewSubString(
+ source, start_position(), end_position());
}
@@ -10128,7 +10323,7 @@ void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
Address* p = rinfo->target_reference_address();
- VisitExternalReferences(p, p + 1);
+ VisitExternalReference(p);
}
@@ -10185,6 +10380,10 @@ void Code::CopyFrom(const CodeDesc& desc) {
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER);
+ } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
+ Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
+ Code* code = Code::cast(*p);
+ it.rinfo()->set_code_age_stub(code);
} else {
it.rinfo()->apply(delta);
}
@@ -10317,31 +10516,35 @@ void Code::ReplaceFirstMap(Map* replace_with) {
}
-Code* Code::FindFirstCode() {
+Code* Code::FindFirstHandler() {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
- return Code::GetCodeFromTargetAddress(info->target_address());
+ Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+ if (code->kind() == Code::HANDLER) return code;
}
return NULL;
}
-void Code::FindAllCode(CodeHandleList* code_list, int length) {
+bool Code::FindHandlers(CodeHandleList* code_list, int length) {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
int i = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
- if (i++ == length) return;
+ if (i == length) return true;
RelocInfo* info = it.rinfo();
Code* code = Code::GetCodeFromTargetAddress(info->target_address());
- ASSERT(code->kind() == Code::STUB);
+ // IC stubs with handlers never contain non-handler code objects before
+ // handler targets.
+ if (code->kind() != Code::HANDLER) break;
code_list->Add(Handle<Code>(code));
+ i++;
}
- UNREACHABLE();
+ return i == length;
}
@@ -10409,24 +10612,22 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
ASSERT(kind() == FUNCTION);
- for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc);
- !it.Done();
- it.Next()) {
- if (it.pc_offset() == pc_offset) return it.ast_id();
+ BackEdgeTable back_edges(this, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
}
return BailoutId::None();
}
-bool Code::allowed_in_shared_map_code_cache() {
- return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() &&
- ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECT);
+void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
}
-void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
+void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge,
+ NO_MARKING_PARITY);
}
@@ -10437,7 +10638,9 @@ void Code::MakeOlder(MarkingParity current_parity) {
MarkingParity code_parity;
GetCodeAgeAndParity(sequence, &age, &code_parity);
if (age != kLastCodeAge && code_parity != current_parity) {
- PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
+ PatchPlatformCodeAge(GetIsolate(),
+ sequence,
+ static_cast<Age>(age + 1),
current_parity);
}
}
@@ -10445,18 +10648,14 @@ void Code::MakeOlder(MarkingParity current_parity) {
bool Code::IsOld() {
- byte* sequence = FindCodeAgeSequence();
- if (sequence == NULL) return false;
- Age age;
- MarkingParity parity;
- GetCodeAgeAndParity(sequence, &age, &parity);
- return age >= kSexagenarianCodeAge;
+ Age age = GetAge();
+ return age >= kIsOldCodeAge;
}
byte* Code::FindCodeAgeSequence() {
return FLAG_age_code &&
- prologue_offset() != kPrologueOffsetNotSet &&
+ prologue_offset() != Code::kPrologueOffsetNotSet &&
(kind() == OPTIMIZED_FUNCTION ||
(kind() == FUNCTION && !has_debug_break_slots()))
? instruction_start() + prologue_offset()
@@ -10464,10 +10663,10 @@ byte* Code::FindCodeAgeSequence() {
}
-int Code::GetAge() {
+Code::Age Code::GetAge() {
byte* sequence = FindCodeAgeSequence();
if (sequence == NULL) {
- return Code::kNoAge;
+ return Code::kNoAgeCodeAge;
}
Age age;
MarkingParity parity;
@@ -10496,12 +10695,25 @@ void Code::GetCodeAgeAndParity(Code* code, Age* age,
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
+ stub = *builtins->MarkCodeAsExecutedOnce();
+ if (code == stub) {
+ // Treat that's never been executed as old immediatly.
+ *age = kIsOldCodeAge;
+ *parity = NO_MARKING_PARITY;
+ return;
+ }
+ stub = *builtins->MarkCodeAsExecutedTwice();
+ if (code == stub) {
+ // Pre-age code that has only been executed once.
+ *age = kPreAgedCodeAge;
+ *parity = NO_MARKING_PARITY;
+ return;
+ }
UNREACHABLE();
}
-Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
- Isolate* isolate = Isolate::Current();
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
Builtins* builtins = isolate->builtins();
switch (age) {
#define HANDLE_CODE_AGE(AGE) \
@@ -10513,6 +10725,14 @@ Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
+ case kNotExecutedCodeAge: {
+ ASSERT(parity == NO_MARKING_PARITY);
+ return *builtins->MarkCodeAsExecutedOnce();
+ }
+ case kExecutedOnceCodeAge: {
+ ASSERT(parity == NO_MARKING_PARITY);
+ return *builtins->MarkCodeAsExecutedTwice();
+ }
default:
UNREACHABLE();
break;
@@ -10772,7 +10992,7 @@ const char* Code::StubType2String(StubType type) {
case CONSTANT: return "CONSTANT";
case CALLBACKS: return "CALLBACKS";
case INTERCEPTOR: return "INTERCEPTOR";
- case MAP_TRANSITION: return "MAP_TRANSITION";
+ case TRANSITION: return "TRANSITION";
case NONEXISTENT: return "NONEXISTENT";
}
UNREACHABLE(); // keep the compiler happy
@@ -10879,15 +11099,15 @@ void Code::Disassemble(const char* name, FILE* out) {
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
DisallowHeapAllocation no_gc;
- FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc);
+ BackEdgeTable back_edges(this, &no_gc);
- PrintF(out, "Back edges (size = %u)\n", back_edges.table_length());
+ PrintF(out, "Back edges (size = %u)\n", back_edges.length());
PrintF(out, "ast_id pc_offset loop_depth\n");
- for ( ; !back_edges.Done(); back_edges.Next()) {
- PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(),
- back_edges.pc_offset(),
- back_edges.loop_depth());
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(),
+ back_edges.pc_offset(i),
+ back_edges.loop_depth(i));
}
PrintF(out, "\n");
@@ -10958,6 +11178,10 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
ValidateElements();
set_map_and_elements(new_map, new_elements);
+
+ // Transition through the allocation site as well if present.
+ maybe_obj = UpdateAllocationSite(new_elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
parameter_map->set(1, new_elements);
@@ -10975,6 +11199,22 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
+bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
+ if (kind != Code::OPTIMIZED_FUNCTION) return false;
+
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+
+ if (object->IsJSObject()) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+
+ return false;
+}
+
MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
int capacity,
int length) {
@@ -11335,7 +11575,7 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
int capacity = kCodesStartIndex + number_of_entries + 1;
if (capacity > 5) capacity = capacity * 5 / 4;
Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
- factory->CopySizeFixedArray(entries, capacity));
+ factory->CopySizeFixedArray(entries, capacity, TENURED));
// The number of codes can change after GC.
starts.Recompute(*entries);
start = starts.at(group);
@@ -11569,22 +11809,6 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
}
-PropertyType JSObject::GetLocalPropertyType(Name* name) {
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return GetLocalElementType(index);
- }
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return lookup.type();
-}
-
-
-PropertyType JSObject::GetLocalElementType(uint32_t index) {
- return GetElementsAccessor()->GetType(this, this, index);
-}
-
-
AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) {
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
@@ -11628,7 +11852,7 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
@@ -11709,18 +11933,17 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
}
-MaybeObject* JSObject::SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -11729,41 +11952,40 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<ExecutableAccessorInfo> data(
- ExecutableAccessorInfo::cast(structure));
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
Object* call_obj = data->setter();
v8::AccessorSetterCallback call_fun =
v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *key));
PropertyCallbackArguments
- args(isolate, data->data(), *self, *holder_handle);
+ args(isolate, data->data(), *object, *holder);
args.Call(call_fun,
v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate);
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Object> holder_handle(holder, isolate);
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { key, holder };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
@@ -11771,7 +11993,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
if (structure->IsDeclaredAccessorInfo()) return value;
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
@@ -11968,10 +12190,13 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
+ Handle<Object> element(dictionary->ValueAt(entry), isolate);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, *value, this, strict_mode);
+ Handle<Object> result = SetElementWithCallback(self, element, index,
+ value, self, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -11996,13 +12221,13 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
+ AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*element);
Context* context = Context::cast(elements->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
context->set(context_index, *value);
// For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = handle(element, isolate);
+ if (!details.IsReadOnly()) value = element;
}
dictionary->ValueAtPut(entry, *value);
}
@@ -12465,11 +12690,24 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
}
-Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->TransitionElementsKind(to_kind),
- Object);
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->TransitionElementsKind(to_kind));
+}
+
+
+bool AllocationSite::IsNestedSite() {
+ ASSERT(FLAG_trace_track_allocation_sites);
+ Object* current = GetHeap()->allocation_sites_list();
+ while (current != NULL && current->IsAllocationSite()) {
+ AllocationSite* current_site = AllocationSite::cast(current);
+ if (current_site->nested_site() == this) {
+ return true;
+ }
+ current = current_site->weak_next();
+ }
+ return false;
}
@@ -12485,23 +12723,26 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
// Walk through to the Allocation Site
AllocationSite* site = memento->GetAllocationSite();
- if (site->IsLiteralSite()) {
+ if (site->SitePointsToLiteral() &&
+ site->transition_info()->IsJSArray()) {
JSArray* transition_info = JSArray::cast(site->transition_info());
ElementsKind kind = transition_info->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
CHECK(transition_info->length()->ToArrayIndex(&length));
if (length <= AllocationSite::kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
+ bool is_nested = site->IsNestedSite();
PrintF(
- "AllocationSite: JSArray %p boilerplate updated %s->%s\n",
+ "AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
reinterpret_cast<void*>(this),
+ is_nested ? "(nested)" : "",
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
@@ -12514,7 +12755,7 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
reinterpret_cast<void*>(this),
@@ -12640,7 +12881,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
Handle<Object> this_handle(receiver, isolate);
@@ -12904,21 +13145,26 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
}
-MaybeObject* JSObject::GetPropertyPostInterceptor(
- Object* receiver,
- Name* name,
+Handle<Object> JSObject::GetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- return GetProperty(receiver, &result, name, attributes);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult lookup(isolate);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
+ Handle<Object> result;
+ if (lookup.IsFound()) {
+ result = GetProperty(object, receiver, &lookup, name, attributes);
+ } else {
+ // Continue searching via the prototype chain.
+ Handle<Object> prototype(object->GetPrototype(), isolate);
+ *attributes = ABSENT;
+ if (prototype->IsNull()) return isolate->factory()->undefined_value();
+ result = GetPropertyWithReceiver(prototype, receiver, name, attributes);
}
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- *attributes = ABSENT;
- if (pt->IsNull()) return GetHeap()->undefined_value();
- return pt->GetPropertyWithReceiver(receiver, name, attributes);
+ return result;
}
@@ -12936,93 +13182,98 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
}
-MaybeObject* JSObject::GetPropertyWithInterceptor(
- Object* receiver,
- Name* name,
+Handle<Object> JSObject::GetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes) {
+ Isolate* isolate = object->GetIsolate();
+
// TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return GetHeap()->undefined_value();
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
- Isolate* isolate = GetIsolate();
- InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope(isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(String::cast(name));
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor(), isolate);
+ Handle<String> name_string = Handle<String>::cast(name);
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetterCallback getter =
v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
+ ApiNamedPropertyAccess("interceptor-named-get", *object, *name));
PropertyCallbackArguments
- args(isolate, interceptor->data(), receiver, this);
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Value> result =
- args.Call(getter, v8::Utils::ToLocal(name_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ args.Call(getter, v8::Utils::ToLocal(name_string));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
*attributes = NONE;
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle to escape this scope.
+ return handle(*result_internal, isolate);
}
}
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
+ return GetPropertyPostInterceptor(object, receiver, name, attributes);
}
-bool JSObject::HasRealNamedProperty(Isolate* isolate, Name* key) {
+bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
+ object->LocalLookupRealNamedProperty(*key, &result);
return result.IsFound() && !result.IsInterceptor();
}
-bool JSObject::HasRealElementProperty(Isolate* isolate, uint32_t index) {
+bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
+ if (object->IsJSGlobalProxy()) {
+ HandleScope scope(isolate);
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return false;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->HasRealElementProperty(isolate, index);
+ return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
}
- return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT;
+ return object->GetElementAttributeWithoutInterceptor(
+ *object, index, false) != ABSENT;
}
-bool JSObject::HasRealNamedCallbackProperty(Isolate* isolate, Name* key) {
+bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
+ object->LocalLookupRealNamedProperty(*key, &result);
return result.IsPropertyCallbacks();
}
@@ -13856,7 +14107,9 @@ void HashTable<Shape, Key>::Rehash(Key key) {
template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
+MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n,
+ Key key,
+ PretenureFlag pretenure) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
int nod = NumberOfDeletedElements();
@@ -13869,14 +14122,14 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
}
const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
+ bool should_pretenure = pretenure == TENURED ||
+ ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this));
Object* obj;
{ MaybeObject* maybe_obj =
Allocate(GetHeap(),
nof * 2,
USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
+ should_pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -13944,6 +14197,8 @@ template class HashTable<ObjectHashTableShape<1>, Object*>;
template class HashTable<ObjectHashTableShape<2>, Object*>;
+template class HashTable<WeakHashTableShape<2>, Object*>;
+
template class Dictionary<NameDictionaryShape, Name*>;
template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
@@ -14044,6 +14299,14 @@ template
int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+Handle<Object> JSObject::PrepareSlowElementsForSort(
+ Handle<JSObject> object, uint32_t limit) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->PrepareSlowElementsForSort(limit),
+ Object);
+}
+
+
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@@ -14146,74 +14409,57 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// the start of the elements array.
// If the object is in dictionary mode, it is converted to fast elements
// mode.
-MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- Heap* heap = GetHeap();
+Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
+ uint32_t limit) {
+ Isolate* isolate = object->GetIsolate();
- ASSERT(!map()->is_observed());
- if (HasDictionaryElements()) {
+ ASSERT(!object->map()->is_observed());
+ if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- SeededNumberDictionary* dict = element_dictionary();
- if (IsJSArray() || dict->requires_slow_elements() ||
+ Handle<SeededNumberDictionary> dict(object->element_dictionary());
+ if (object->IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
- return PrepareSlowElementsForSort(limit);
+ return JSObject::PrepareSlowElementsForSort(object, limit);
}
// Convert to fast elements.
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- FAST_HOLEY_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- Map* new_map = Map::cast(obj);
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, FAST_HOLEY_ELEMENTS);
- PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
- Object* new_array;
- { MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
- if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
- }
- FixedArray* fast_elements = FixedArray::cast(new_array);
- dict->CopyValuesTo(fast_elements);
- ValidateElements();
+ PretenureFlag tenure = isolate->heap()->InNewSpace(*object) ?
+ NOT_TENURED: TENURED;
+ Handle<FixedArray> fast_elements =
+ isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
+ dict->CopyValuesTo(*fast_elements);
+ object->ValidateElements();
- set_map_and_elements(new_map, fast_elements);
- } else if (HasExternalArrayElements()) {
+ object->set_map_and_elements(*new_map, *fast_elements);
+ } else if (object->HasExternalArrayElements()) {
// External arrays cannot have holes or undefined elements.
- return Smi::FromInt(ExternalArray::cast(elements())->length());
- } else if (!HasFastDoubleElements()) {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ return handle(Smi::FromInt(
+ ExternalArray::cast(object->elements())->length()), isolate);
+ } else if (!object->HasFastDoubleElements()) {
+ JSObject::EnsureWritableFastElements(object);
}
- ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
- FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
+ Handle<FixedArrayBase> elements_base(object->elements());
uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
if (limit > elements_length) {
limit = elements_length ;
}
if (limit == 0) {
- return Smi::FromInt(0);
- }
-
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Pessimistically allocate space for return value before
- // we start mutating the array.
- Object* new_double;
- { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
+ return handle(Smi::FromInt(0), isolate);
}
uint32_t result = 0;
- if (elements_base->map() == heap->fixed_double_array_map()) {
- FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
+ if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
+ FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
// Split elements into defined and the_hole, in that order.
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -14240,7 +14486,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
holes++;
}
} else {
- FixedArray* elements = FixedArray::cast(elements_base);
+ FixedArray* elements = FixedArray::cast(*elements_base);
DisallowHeapAllocation no_gc;
// Split elements into defined, undefined and the_hole, in that order. Only
@@ -14285,12 +14531,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
}
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
+ return isolate->factory()->NewNumberFromUint(result);
}
@@ -14508,17 +14749,6 @@ PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
- Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dict->GetIsolate(),
- dict->Add(*name, *value, details),
- NameDictionary);
-}
-
-
Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
Handle<GlobalObject> global,
Handle<Name> name) {
@@ -15597,6 +15827,41 @@ void ObjectHashTable::RemoveEntry(int entry) {
}
+Object* WeakHashTable::Lookup(Object* key) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
+ return get(EntryToValueIndex(entry));
+}
+
+
+MaybeObject* WeakHashTable::Put(Object* key, Object* value) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ set(EntryToValueIndex(entry), value);
+ return this;
+ }
+
+ // Check whether the hash table should be extended.
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ WeakHashTable* table = WeakHashTable::cast(obj);
+ table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value);
+ return table;
+}
+
+
+void WeakHashTable::AddEntry(int entry, Object* key, Object* value) {
+ set(EntryToIndex(entry), key);
+ set(EntryToValueIndex(entry), value);
+ ElementAdded();
+}
+
+
DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator(
DeclaredAccessorDescriptor* descriptor)
: array_(descriptor->serialized_data()->GetDataStartAddress()),
@@ -16072,8 +16337,8 @@ void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
}
-Type* PropertyCell::UpdateType(Handle<PropertyCell> cell,
- Handle<Object> value) {
+Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
Isolate* isolate = cell->GetIsolate();
Handle<Type> old_type(cell->type(), isolate);
// TODO(2803): Do not track ConsString as constant because they cannot be
@@ -16083,34 +16348,27 @@ Type* PropertyCell::UpdateType(Handle<PropertyCell> cell,
: Type::Constant(value, isolate), isolate);
if (new_type->Is(old_type)) {
- return *old_type;
+ return old_type;
}
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) {
- return *new_type;
+ return new_type;
}
- return Type::Any();
+ return handle(Type::Any(), isolate);
}
-MaybeObject* PropertyCell::SetValueInferType(Object* value,
- WriteBarrierMode ignored) {
- set_value(value, ignored);
- if (!Type::Any()->Is(type())) {
- IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
- MaybeObject* maybe_type = trampoline.CallWithReturnValue(
- &PropertyCell::UpdateType,
- Handle<PropertyCell>(this),
- Handle<Object>(value, GetIsolate()));
- Type* new_type = NULL;
- if (!maybe_type->To(&new_type)) return maybe_type;
- set_type(new_type);
+void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ cell->set_value(*value);
+ if (!Type::Any()->Is(cell->type())) {
+ Handle<Type> new_type = UpdatedType(cell, value);
+ cell->set_type(*new_type);
}
- return value;
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index d3593b6edc..e8c9850484 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -333,7 +333,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
+#define INSTANCE_TYPE_LIST(V) \
V(STRING_TYPE) \
V(ASCII_STRING_TYPE) \
V(CONS_STRING_TYPE) \
@@ -405,6 +405,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(CONSTANT_POOL_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
\
V(JS_MESSAGE_OBJECT_TYPE) \
@@ -431,18 +432,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
\
V(JS_FUNCTION_TYPE) \
V(JS_FUNCTION_PROXY_TYPE) \
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
-#else
-#define INSTANCE_TYPE_LIST_DEBUGGER(V)
-#endif
-
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
- INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
@@ -725,6 +716,7 @@ enum InstanceType {
EXTERNAL_DOUBLE_ARRAY_TYPE,
EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
+ CONSTANT_POOL_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -873,8 +865,9 @@ enum CompareResult {
inline void set_##name(type* value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
class AccessorPair;
+class AllocationSite;
+class AllocationSiteContext;
class DictionaryElementsAccessor;
class ElementsAccessor;
class Failure;
@@ -1010,6 +1003,7 @@ class MaybeObject BASE_EMBEDDED {
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
V(Context) \
V(NativeContext) \
V(ScopeInfo) \
@@ -1054,7 +1048,8 @@ class MaybeObject BASE_EMBEDDED {
V(AccessCheckNeeded) \
V(Cell) \
V(PropertyCell) \
- V(ObjectHashTable)
+ V(ObjectHashTable) \
+ V(WeakHashTable)
#define ERROR_MESSAGES_LIST(V) \
@@ -1206,6 +1201,7 @@ class MaybeObject BASE_EMBEDDED {
V(kModuleStatement, "Module statement") \
V(kModuleVariable, "Module variable") \
V(kModuleUrl, "Module url") \
+ V(kNativeFunctionLiteral, "Native function literal") \
V(kNoCasesLeft, "no cases left") \
V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
"No empty arrays here in EmitFastAsciiArrayJoin") \
@@ -1249,7 +1245,6 @@ class MaybeObject BASE_EMBEDDED {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "register was clobbered") \
V(kScopedBlock, "ScopedBlock") \
- V(kSharedFunctionInfoLiteral, "Shared function info literal") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kStackFrameTypesMustMatch, "stack frame types must match") \
@@ -1440,8 +1435,7 @@ class Object : public MaybeObject {
}
inline MaybeObject* AllocateNewStorageFor(Heap* heap,
- Representation representation,
- PretenureFlag tenure = NOT_TENURED);
+ Representation representation);
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
@@ -1467,6 +1461,12 @@ class Object : public MaybeObject {
MUST_USE_RESULT inline MaybeObject* GetProperty(
Name* key,
PropertyAttributes* attributes);
+
+ // TODO(yangguo): this should eventually replace the non-handlified version.
+ static Handle<Object> GetPropertyWithReceiver(Handle<Object> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver(
Object* receiver,
Name* key,
@@ -1950,42 +1950,27 @@ class JSReceiver: public HeapObject {
// Casting.
static inline JSReceiver* cast(Object* obj);
+ // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
static Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode =
+ MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* SetPropertyOrFail(
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
-
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetProperty(
- LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value);
+ // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
+ static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name);
+ static inline bool HasElement(Handle<JSReceiver> object, uint32_t index);
+ static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index);
+ // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
static Handle<Object> DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
DeleteMode mode = NORMAL_DELETION);
@@ -2011,12 +1996,6 @@ class JSReceiver: public HeapObject {
inline PropertyAttributes GetElementAttribute(uint32_t index);
inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
- // Can cause a GC.
- inline bool HasProperty(Name* name);
- inline bool HasLocalProperty(Name* name);
- inline bool HasElement(uint32_t index);
- inline bool HasLocalElement(uint32_t index);
-
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -2036,12 +2015,24 @@ class JSReceiver: public HeapObject {
protected:
Smi* GenerateIdentityHash();
+ static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value);
+
private:
PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
LookupResult* result,
Name* name,
bool continue_search);
+ static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -2121,50 +2112,49 @@ class JSObject: public JSReceiver {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Requires: HasFastElements().
+ static Handle<FixedArray> EnsureWritableFastElements(
+ Handle<JSObject> object);
MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
// Returns the number of non-undefined values.
- MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit);
+ static Handle<Object> PrepareElementsForSort(Handle<JSObject> object,
+ uint32_t limit);
// As PrepareElementsForSort, but only on objects where elements is
// a dictionary, and it will stay a dictionary.
+ static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
+ uint32_t limit);
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- Name* name);
+ static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- Name* name,
- Object* value,
- bool check_prototype,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
- Object* structure,
- Name* name,
- Object* value,
- JSObject* holder,
+ static Handle<Object> SetPropertyWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
- Name* name,
- Object* value,
+
+ static Handle<Object> SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
- Name* name,
- Object* value,
+
+ static Handle<Object> SetPropertyForResult(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
@@ -2183,18 +2173,17 @@ class JSObject: public JSReceiver {
static inline Handle<Map> FindTransitionToField(Handle<Map> map,
Handle<Name> key);
- inline int LastAddedFieldIndex();
-
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AllocateStorageForMap(Map* map);
+ // Migrates the given object to a map whose field representations are the
+ // lowest upper bound of all known representations for that field.
static void MigrateInstance(Handle<JSObject> instance);
- inline MUST_USE_RESULT MaybeObject* MigrateInstance();
+ // Migrates the given object only if the target map is already available,
+ // or returns an empty handle if such a map is not yet available.
static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
- inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
@@ -2209,27 +2198,18 @@ class JSObject: public JSReceiver {
// Handles the special representation of JS global objects.
Object* GetNormalizedProperty(LookupResult* result);
- // Sets the property value in a normalized object given (key, value).
- // Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
- Handle<Object> value);
-
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(LookupResult* result,
- Object* value);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(Name* name,
- Object* value,
- PropertyDetails details);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyDetails details);
static void OptimizeAsPrototype(Handle<JSObject> object);
@@ -2253,6 +2233,15 @@ class JSObject: public JSReceiver {
uint32_t index,
bool continue_search);
+ // Retrieves an AccessorPair property from the given object. Might return
+ // undefined if the property doesn't exist or is of a different kind.
+ static Handle<Object> GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component);
+
+ // Defines an AccessorPair property on the given object.
+ // TODO(mstarzinger): Rename to SetAccessor() and return empty handle on
+ // exception instead of letting callers check for scheduled exception.
static void DefineAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
@@ -2260,24 +2249,19 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes,
v8::AccessControl access_control = v8::DEFAULT);
- MaybeObject* LookupAccessor(Name* name, AccessorComponent component);
-
+ // Defines an AccessorInfo property on the given object.
static Handle<Object> SetAccessor(Handle<JSObject> object,
Handle<AccessorInfo> info);
- // Used from Object::GetProperty().
- MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- Name* name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
- Object* receiver,
- Name* name,
+ static Handle<Object> GetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
- Object* receiver,
- Name* name,
+ static Handle<Object> GetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
Object* receiver,
@@ -2361,9 +2345,6 @@ class JSObject: public JSReceiver {
return old_capacity + (old_capacity >> 1) + 16;
}
- PropertyType GetLocalPropertyType(Name* name);
- PropertyType GetLocalElementType(uint32_t index);
-
// These methods do not perform access checks!
AccessorPair* GetLocalPropertyAccessorPair(Name* name);
AccessorPair* GetLocalElementAccessorPair(uint32_t index);
@@ -2438,9 +2419,11 @@ class JSObject: public JSReceiver {
inline bool HasIndexedInterceptor();
// Support functions for v8 api (needed for correct interceptor behavior).
- bool HasRealNamedProperty(Isolate* isolate, Name* key);
- bool HasRealElementProperty(Isolate* isolate, uint32_t index);
- bool HasRealNamedCallbackProperty(Isolate* isolate, Name* key);
+ static bool HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> key);
+ static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index);
+ static bool HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> key);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -2456,8 +2439,6 @@ class JSObject: public JSReceiver {
void LocalLookupRealNamedProperty(Name* name, LookupResult* result);
void LookupRealNamedProperty(Name* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
void LookupCallbackProperty(Name* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@@ -2483,32 +2464,6 @@ class JSObject: public JSReceiver {
// Returns the number of enumerable elements.
int GetEnumElementKeys(FixedArray* storage);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(
- Map* new_map,
- Name* name,
- Object* value,
- int field_index,
- Representation representation);
-
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantProperty(
- Name* name,
- Object* constant,
- PropertyAttributes attributes,
- TransitionFlag flag);
-
- MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes);
-
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
@@ -2519,43 +2474,18 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
ElementsKind elements_kind);
- static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
+ static void TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
- MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
- int modify_index,
- Representation new_representation,
- StoreMode store_mode);
-
- // Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- TransitionFlag flag = INSERT_TRANSITION);
-
- // Add a property to a slow-case object.
- MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to an object. May cause GC.
- MUST_USE_RESULT MaybeObject* AddProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- TransitionFlag flag = INSERT_TRANSITION);
+ // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
+ static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
+ static void GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2565,10 +2495,6 @@ class JSObject: public JSReceiver {
PropertyNormalizationMode mode,
int expected_additional_properties);
- MUST_USE_RESULT MaybeObject* NormalizeProperties(
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-
// Convert and update the elements backing store to be a
// SeededNumberDictionary dictionary. Returns the backing after conversion.
static Handle<SeededNumberDictionary> NormalizeElements(
@@ -2577,13 +2503,9 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* NormalizeElements();
// Transform slow named properties to fast variants.
- // Returns failure if allocation failed.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
- MUST_USE_RESULT MaybeObject* TransformToFastProperties(
- int unused_property_fields);
-
// Access fast-case object properties at index.
MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
Representation representation,
@@ -2616,22 +2538,26 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Casting.
- static inline JSObject* cast(Object* obj);
-
// Disalow further properties to be added to the object.
static Handle<Object> PreventExtensions(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* PreventExtensions();
// ES5 Object.freeze
- MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate);
-
+ static Handle<Object> Freeze(Handle<JSObject> object);
// Called the first time an object is observed with ES7 Object.observe.
- MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
+ static void SetObserved(Handle<JSObject> object);
- // Copy object
- MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
+ // Copy object.
+ static Handle<JSObject> Copy(Handle<JSObject> object,
+ Handle<AllocationSite> site);
+ static Handle<JSObject> Copy(Handle<JSObject> object);
+ static Handle<JSObject> DeepCopy(Handle<JSObject> object,
+ AllocationSiteContext* site_context);
+ static Handle<JSObject> DeepWalk(Handle<JSObject> object,
+ AllocationSiteContext* site_context);
+
+ // Casting.
+ static inline JSObject* cast(Object* obj);
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
@@ -2670,6 +2596,14 @@ class JSObject: public JSReceiver {
void IncrementSpillStatistics(SpillInformation* info);
#endif
+
+#ifdef VERIFY_HEAP
+ // If a GC was caused while constructing this object, the elements pointer
+ // may point to a one pointer filler map. The object won't be rooted, but
+ // our heap verification code could stumble across it.
+ bool ElementsAreSafeToExamine();
+#endif
+
Object* SlowReverseLookup(Object* value);
// Maximal number of fast properties for the JSObject. Used to
@@ -2733,15 +2667,15 @@ class JSObject: public JSReceiver {
private:
friend class DictionaryElementsAccessor;
friend class JSReceiver;
+ friend class Object;
- // TODO(mstarzinger): Soon to be handlified.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+ // Used from Object::GetProperty().
+ static Handle<Object> GetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<Name> name,
+ PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
@@ -2755,11 +2689,12 @@ class JSObject: public JSReceiver {
JSReceiver* receiver,
uint32_t index,
bool continue_search);
- MUST_USE_RESULT MaybeObject* SetElementWithCallback(
- Object* structure,
+ static Handle<Object> SetElementWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
uint32_t index,
- Object* value,
- JSObject* holder,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
uint32_t index,
@@ -2775,17 +2710,91 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
+ MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
+ uint32_t index,
+ Object* value,
+ bool* found,
+ StrictModeFlag strict_mode);
// Searches the prototype chain for property 'name'. If it is found and
// has a setter, invoke it and set '*done' to true. If it is found and is
// read-only, reject and set '*done' to true. Otherwise, set '*done' to
- // false. Can cause GC and can return a failure result with '*done==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
- Name* name,
- Object* value,
+ // false. Can throw and return an empty handle with '*done==true'.
+ static Handle<Object> SetPropertyViaPrototypes(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
+ static Handle<Object> SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ static Handle<Object> SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+ static Handle<Object> SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
+ bool check_prototype,
+ StrictModeFlag strict_mode);
+
+ // Add a property to an object.
+ static Handle<Object> AddProperty(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ TransitionFlag flag = INSERT_TRANSITION);
+
+ // Add a constant function property to a fast-case object.
+ // This leaves a CONSTANT_TRANSITION in the old map, and
+ // if it is called on a second object with this map, a
+ // normal property is added instead, with a map transition.
+ // This avoids the creation of many maps with the same constant
+ // function, all orphaned.
+ static void AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object.
+ static void AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object using a map transition to
+ // new_map.
+ static void AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation);
+
+ // Add a property to a slow-case object.
+ static void AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
static Handle<Object> DeleteProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -2919,7 +2928,8 @@ class FixedArray: public FixedArrayBase {
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length);
+ MUST_USE_RESULT MaybeObject* CopySize(int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
// Add the elements of a JSArray to this FixedArray.
MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
@@ -3042,6 +3052,100 @@ class FixedDoubleArray: public FixedArrayBase {
};
+// ConstantPoolArray describes a fixed-sized array containing constant pool
+// entires.
+// The format of the pool is:
+// [0]: Field holding the first index which is a pointer entry
+// [1]: Field holding the first index which is a int32 entry
+// [2] ... [first_ptr_index() - 1]: 64 bit entries
+// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
+// [first_int32_index()] ... [length - 1]: 32 bit entries
+class ConstantPoolArray: public FixedArrayBase {
+ public:
+ // Getters for the field storing the first index for different type entries.
+ inline int first_ptr_index();
+ inline int first_int64_index();
+ inline int first_int32_index();
+
+ // Getters for counts of different type entries.
+ inline int count_of_ptr_entries();
+ inline int count_of_int64_entries();
+ inline int count_of_int32_entries();
+
+ // Setter and getter for pool elements.
+ inline Object* get_ptr_entry(int index);
+ inline int64_t get_int64_entry(int index);
+ inline int32_t get_int32_entry(int index);
+ inline double get_int64_entry_as_double(int index);
+
+ inline void set(int index, Object* value);
+ inline void set(int index, int64_t value);
+ inline void set(int index, double value);
+ inline void set(int index, int32_t value);
+
+ // Set up initial state.
+ inline void SetEntryCounts(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries);
+
+ // Copy operations
+ MUST_USE_RESULT inline MaybeObject* Copy();
+
+ // Garbage collection support.
+ inline static int SizeFor(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ return RoundUp(OffsetAt(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries),
+ kPointerSize);
+ }
+
+ // Code Generation support.
+ inline int OffsetOfElementAt(int index) {
+ ASSERT(index < length());
+ if (index >= first_int32_index()) {
+ return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
+ index - first_int32_index());
+ } else if (index >= first_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+ } else {
+ return OffsetAt(index, 0, 0);
+ }
+ }
+
+ // Casting.
+ static inline ConstantPoolArray* cast(Object* obj);
+
+ // Layout description.
+ static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstInt32IndexOffset =
+ kFirstPointerIndexOffset + kPointerSize;
+ static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
+
+ // Dispatched behavior.
+ void ConstantPoolIterateBody(ObjectVisitor* v);
+
+ DECLARE_PRINTER(ConstantPoolArray)
+ DECLARE_VERIFIER(ConstantPoolArray)
+
+ private:
+ inline void set_first_ptr_index(int value);
+ inline void set_first_int32_index(int value);
+
+ inline static int OffsetAt(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ return kFirstOffset
+ + (number_of_int64_entries * kInt64Size)
+ + (number_of_ptr_entries * kPointerSize)
+ + (number_of_int32_entries * kInt32Size);
+ }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray);
+};
+
+
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
// [0]: Number of descriptors
@@ -3175,6 +3279,13 @@ class DescriptorArray: public FixedArray {
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
+ static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other);
MUST_USE_RESULT MaybeObject* Merge(int verbatim,
int valid,
int new_size,
@@ -3191,6 +3302,10 @@ class DescriptorArray: public FixedArray {
return CopyUpToAddAttributes(enumeration_index, NONE);
}
+ static Handle<DescriptorArray> CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes(
int enumeration_index,
PropertyAttributes attributes);
@@ -3543,7 +3658,10 @@ class HashTable: public FixedArray {
MUST_USE_RESULT MaybeObject* Shrink(Key key);
// Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
+ MUST_USE_RESULT MaybeObject* EnsureCapacity(
+ int n,
+ Key key,
+ PretenureFlag pretenure = NOT_TENURED);
};
@@ -3982,6 +4100,58 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
};
+template <int entrysize>
+class WeakHashTableShape : public BaseShape<Object*> {
+ public:
+ static inline bool IsMatch(Object* key, Object* other);
+ static inline uint32_t Hash(Object* key);
+ static inline uint32_t HashForObject(Object* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ Object* key);
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = entrysize;
+};
+
+
+// WeakHashTable maps keys that are arbitrary objects to object values.
+// It is used for the global weak hash table that maps objects
+// embedded in optimized code to dependent code lists.
+class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
+ public:
+ static inline WeakHashTable* cast(Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<WeakHashTable*>(obj);
+ }
+
+ // Looks up the value associated with the given key. The hole value is
+ // returned in case the key is not present.
+ Object* Lookup(Object* key);
+
+ // Adds (or overwrites) the value associated with the given key. Mapping a
+ // key to the hole value causes removal of the whole entry.
+ MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+
+ // This function is called when heap verification is turned on.
+ void Zap(Object* value) {
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ set(EntryToIndex(i), value);
+ set(EntryToValueIndex(i), value);
+ }
+ }
+
+ private:
+ friend class MarkCompactCollector;
+
+ void AddEntry(int entry, Object* key, Object* value);
+
+ // Returns the index to the value of an entry.
+ static inline int EntryToValueIndex(int entry) {
+ return EntryToIndex(entry) + 1;
+ }
+};
+
+
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
@@ -4120,9 +4290,9 @@ class ScopeInfo : public FixedArray {
// Copies all the context locals into an object used to materialize a scope.
- bool CopyContextLocalsToScopeObject(Isolate* isolate,
- Handle<Context> context,
- Handle<JSObject> scope_object);
+ static bool CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object);
static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
@@ -4233,8 +4403,9 @@ class NormalizedMapCache: public FixedArray {
public:
static const int kEntries = 64;
- MUST_USE_RESULT MaybeObject* Get(JSObject* object,
- PropertyNormalizationMode mode);
+ static Handle<Map> Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> object,
+ PropertyNormalizationMode mode);
void Clear();
@@ -4772,6 +4943,7 @@ class Code: public HeapObject {
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
V(STUB) \
+ V(HANDLER) \
V(BUILTIN) \
V(REGEXP)
@@ -4811,19 +4983,16 @@ class Code: public HeapObject {
CONSTANT,
CALLBACKS,
INTERCEPTOR,
- MAP_TRANSITION,
+ TRANSITION,
NONEXISTENT
};
- enum StubHolder {
- OWN_STUB,
- PROTOTYPE_STUB
- };
-
typedef int ExtraICState;
static const ExtraICState kNoExtraICState = 0;
+ static const int kPrologueOffsetNotSet = -1;
+
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* ICState2String(InlineCacheState state);
@@ -4886,6 +5055,9 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
+ inline Kind handler_kind() {
+ return static_cast<Kind>(arguments_count());
+ }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
@@ -4895,7 +5067,8 @@ class Code: public HeapObject {
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
+ kind == BINARY_OP_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.
@@ -4904,6 +5077,7 @@ class Code: public HeapObject {
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
inline bool is_debug_stub();
+ inline bool is_handler() { return kind() == HANDLER; }
inline bool is_load_stub() { return kind() == LOAD_IC; }
inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
inline bool is_store_stub() { return kind() == STORE_IC; }
@@ -4914,6 +5088,7 @@ class Code: public HeapObject {
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
+ inline bool is_keyed_stub();
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline int major_key();
@@ -4997,8 +5172,6 @@ class Code: public HeapObject {
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
- bool allowed_in_shared_map_code_cache();
-
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -5011,9 +5184,12 @@ class Code: public HeapObject {
void FindAllMaps(MapHandleList* maps);
void ReplaceFirstMap(Map* replace);
- // Find the first code in an IC stub.
- Code* FindFirstCode();
- void FindAllCode(CodeHandleList* code_list, int length);
+ // Find the first handler in an IC stub.
+ Code* FindFirstHandler();
+
+ // Find |length| handlers and put them into |code_list|. Returns false if not
+ // enough handlers can be found.
+ bool FindHandlers(CodeHandleList* code_list, int length = -1);
// Find the first name in an IC stub.
Name* FindFirstName();
@@ -5024,8 +5200,6 @@ class Code: public HeapObject {
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
- class ExtraICStateStubHolder: public BitField<StubHolder, 0, 1> {};
-
static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
return ExtraICStateStrictMode::decode(extra_ic_state);
}
@@ -5042,10 +5216,6 @@ class Code: public HeapObject {
ExtraICStateStrictMode::encode(strict_mode);
}
- static inline ExtraICState ComputeExtraICState(StubHolder stub_holder) {
- return ExtraICStateStubHolder::encode(stub_holder);
- }
-
// Flags operations.
static inline Flags ComputeFlags(
Kind kind,
@@ -5142,11 +5312,15 @@ class Code: public HeapObject {
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
- kNoAge = 0,
+ kNotExecutedCodeAge = -2,
+ kExecutedOnceCodeAge = -1,
+ kNoAgeCodeAge = 0,
CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
kAfterLastCodeAge,
kLastCodeAge = kAfterLastCodeAge - 1,
- kCodeAgeCount = kAfterLastCodeAge - 1
+ kCodeAgeCount = kAfterLastCodeAge - 1,
+ kIsOldCodeAge = kSexagenarianCodeAge,
+ kPreAgedCodeAge = kIsOldCodeAge - 1
};
#undef DECLARE_CODE_AGE_ENUM
@@ -5154,19 +5328,25 @@ class Code: public HeapObject {
// being entered through the prologue. Used to determine when it is
// relatively safe to flush this code object and replace it with the lazy
// compilation stub.
- static void MakeCodeAgeSequenceYoung(byte* sequence);
+ static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
+ static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeOlder(MarkingParity);
static bool IsYoungSequence(byte* sequence);
bool IsOld();
- int GetAge();
+ Age GetAge();
+ static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
+ return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
+ }
void PrintDeoptLocation(int bailout_id);
bool CanDeoptAt(Address pc);
#ifdef VERIFY_HEAP
- void VerifyEmbeddedMapsDependency();
+ void VerifyEmbeddedObjectsDependency();
#endif
+ static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -5300,10 +5480,11 @@ class Code: public HeapObject {
MarkingParity* parity);
static void GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity);
- static Code* GetCodeAgeStub(Age age, MarkingParity parity);
+ static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
// Code aging -- platform-specific
- static void PatchPlatformCodeAge(byte* sequence, Age age,
+ static void PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence, Age age,
MarkingParity parity);
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -5591,6 +5772,12 @@ class Map: public HeapObject {
Map* transitioned_map);
inline void SetTransition(int transition_index, Map* target);
inline Map* GetTransition(int transition_index);
+
+ static Handle<TransitionArray> AddTransition(Handle<Map> map,
+ Handle<Name> key,
+ Handle<Map> target,
+ SimpleTransitionFlag flag);
+
MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key,
Map* target,
SimpleTransitionFlag flag);
@@ -5611,16 +5798,16 @@ class Map: public HeapObject {
int target_number_of_fields,
int target_inobject,
int target_unused);
+ static Handle<Map> GeneralizeAllFieldRepresentations(
+ Handle<Map> map,
+ Representation new_representation);
static Handle<Map> GeneralizeRepresentation(
Handle<Map> map,
int modify_index,
Representation new_representation,
StoreMode store_mode);
- MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
- int modify_index,
- Representation representation,
- StoreMode store_mode);
- MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations(
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map,
int modify_index,
StoreMode store_mode,
PropertyAttributes attributes,
@@ -5791,18 +5978,24 @@ class Map: public HeapObject {
// descriptor array of the map. Returns NULL if no updated map is found.
Map* CurrentMapForDeprecated();
+ static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
static Handle<Map> CopyDropDescriptors(Handle<Map> map);
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
+ static Handle<Map> CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name);
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
DescriptorArray* descriptors,
TransitionFlag flag,
Name* name = NULL,
SimpleTransitionFlag simple_flag = FULL_TRANSITION);
- MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
+ static Handle<Map> CopyInstallDescriptors(
+ Handle<Map> map,
int new_descriptor,
- DescriptorArray* descriptors);
+ Handle<DescriptorArray> descriptors);
MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
@@ -5818,13 +6011,12 @@ class Map: public HeapObject {
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyForObserved();
+
+ static Handle<Map> CopyForObserved(Handle<Map> map);
static Handle<Map> CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
- MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing);
inline void AppendDescriptor(Descriptor* desc,
const DescriptorArray::WhitenessWitness&);
@@ -6221,9 +6413,6 @@ class Script: public Struct {
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
-//
-// NOTE: Order is important: math functions should be at the end of
-// the list and MathFloor should be the first math function.
#define FUNCTIONS_WITH_ID_LIST(V) \
V(Array.prototype, push, ArrayPush) \
V(Array.prototype, pop, ArrayPop) \
@@ -6258,8 +6447,7 @@ enum BuiltinFunctionId {
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf,
- kFirstMathFunctionId = kMathFloor
+ kMathPowHalf
};
@@ -7841,14 +8029,24 @@ class AllocationSite: public Struct {
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
DECL_ACCESSORS(transition_info, Object)
+ // nested_site threads a list of sites that represent nested literals
+ // walked in a particular order. So [[1, 2], 1, 2] will have one
+ // nested_site, but [[1, 2], 3, [4]] will have a list of two.
+ DECL_ACCESSORS(nested_site, Object)
+ DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
- void Initialize() {
- SetElementsKind(GetInitialFastElementsKind());
+ inline void Initialize();
+
+ bool HasNestedSites() {
+ return nested_site()->IsAllocationSite();
}
+ // This method is expensive, it should only be called for reporting.
+ bool IsNestedSite();
+
ElementsKind GetElementsKind() {
- ASSERT(!IsLiteralSite());
+ ASSERT(!SitePointsToLiteral());
return static_cast<ElementsKind>(Smi::cast(transition_info())->value());
}
@@ -7856,11 +8054,11 @@ class AllocationSite: public Struct {
set_transition_info(Smi::FromInt(static_cast<int>(kind)));
}
- bool IsLiteralSite() {
+ bool SitePointsToLiteral() {
// If transition_info is a smi, then it represents an ElementsKind
// for a constructed array. Otherwise, it must be a boilerplate
- // for an array literal
- return transition_info()->IsJSArray();
+ // for an object or array literal.
+ return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
DECLARE_PRINTER(AllocationSite)
@@ -7873,11 +8071,13 @@ class AllocationSite: public Struct {
static inline bool CanTrack(InstanceType type);
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
- static const int kWeakNextOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kDependentCodeOffset = kNestedSiteOffset + kPointerSize;
+ static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
- kTransitionInfoOffset + kPointerSize,
+ kDependentCodeOffset + kPointerSize,
kSize> BodyDescriptor;
private:
@@ -7902,7 +8102,8 @@ class AllocationMemento: public Struct {
DECLARE_VERIFIER(AllocationMemento)
// Returns NULL if no AllocationMemento is available for object.
- static AllocationMemento* FindForJSObject(JSObject* object);
+ static AllocationMemento* FindForJSObject(JSObject* object,
+ bool in_GC = false);
static inline AllocationMemento* cast(Object* obj);
private:
@@ -9018,9 +9219,17 @@ class PropertyCell: public Cell {
// of the cell's current type and the value's type. If the change causes
// a change of the type of the cell's contents, code dependent on the cell
// will be deoptimized.
- MUST_USE_RESULT MaybeObject* SetValueInferType(
- Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ static void SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
+ // Computes the new type of the cell's contents for the given value, but
+ // without actually modifying the 'type' field.
+ static Handle<Type> UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
+ void AddDependentCompilationInfo(CompilationInfo* info);
+
+ void AddDependentCode(Handle<Code> code);
// Casting.
static inline PropertyCell* cast(Object* obj);
@@ -9045,13 +9254,6 @@ class PropertyCell: public Cell {
kSize,
kSize> BodyDescriptor;
- void AddDependentCompilationInfo(CompilationInfo* info);
-
- void AddDependentCode(Handle<Code> code);
-
- static Type* UpdateType(Handle<PropertyCell> cell,
- Handle<Object> value);
-
private:
DECL_ACCESSORS(type_raw, Object)
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
@@ -9070,9 +9272,6 @@ class JSProxy: public JSReceiver {
// Casting.
static inline JSProxy* cast(Object* obj);
- bool HasPropertyWithHandler(Name* name);
- bool HasElementWithHandler(uint32_t index);
-
MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
Object* receiver,
Name* name);
@@ -9080,21 +9279,15 @@ class JSProxy: public JSReceiver {
Object* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
- JSReceiver* receiver,
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
// that is read-only, throw. In all these cases set '*done' to true,
// otherwise set it to false.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver,
- Name* name,
- Object* value,
+ static Handle<Object> SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
@@ -9142,12 +9335,21 @@ class JSProxy: public JSReceiver {
private:
friend class JSReceiver;
+ static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
+ static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
+ static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
+
static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
DeleteMode mode);
@@ -10157,6 +10359,9 @@ class ObjectVisitor BASE_EMBEDDED {
// [start, end). Any or all of the values may be modified on return.
virtual void VisitPointers(Object** start, Object** end) = 0;
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
@@ -10185,22 +10390,14 @@ class ObjectVisitor BASE_EMBEDDED {
// about the code's age.
virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
- // Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
-
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
- // Visits a contiguous arrays of external references (references to the C++
- // heap) in the half-open range [start, end). Any or all of the values
- // may be modified on return.
- virtual void VisitExternalReferences(Address* start, Address* end) {}
-
+ // Visits an external reference embedded into a code object.
virtual void VisitExternalReference(RelocInfo* rinfo);
- inline void VisitExternalReference(Address* p) {
- VisitExternalReferences(p, p + 1);
- }
+ // Visits an external reference. The value may be modified on return.
+ virtual void VisitExternalReference(Address* p) {}
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 085143d998..e9c0254522 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -29,6 +29,7 @@
#include "v8.h"
+#include "full-codegen.h"
#include "hydrogen.h"
#include "isolate.h"
#include "v8threads.h"
@@ -36,6 +37,19 @@
namespace v8 {
namespace internal {
+OptimizingCompilerThread::~OptimizingCompilerThread() {
+ ASSERT_EQ(0, input_queue_length_);
+ DeleteArray(input_queue_);
+ if (FLAG_concurrent_osr) {
+#ifdef DEBUG
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ CHECK_EQ(NULL, osr_buffer_[i]);
+ }
+#endif
+ DeleteArray(osr_buffer_);
+ }
+}
+
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
@@ -74,7 +88,6 @@ void OptimizingCompilerThread::Run() {
{ AllowHandleDereference allow_handle_dereference;
FlushInputQueue(true);
}
- Release_Store(&queue_length_, static_cast<AtomicWord>(0));
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
stop_semaphore_.Signal();
// Return to start of consumer loop.
@@ -93,99 +106,125 @@ void OptimizingCompilerThread::Run() {
}
+RecompileJob* OptimizingCompilerThread::NextInput() {
+ LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+ if (input_queue_length_ == 0) return NULL;
+ RecompileJob* job = input_queue_[InputQueueIndex(0)];
+ ASSERT_NE(NULL, job);
+ input_queue_shift_ = InputQueueIndex(1);
+ input_queue_length_--;
+ return job;
+}
+
+
void OptimizingCompilerThread::CompileNext() {
- OptimizingCompiler* optimizing_compiler = NULL;
- bool result = input_queue_.Dequeue(&optimizing_compiler);
- USE(result);
- ASSERT(result);
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+ RecompileJob* job = NextInput();
+ ASSERT_NE(NULL, job);
// The function may have already been optimized by OSR. Simply continue.
- OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ RecompileJob::Status status = job->OptimizeGraph();
USE(status); // Prevent an unused-variable error in release mode.
- ASSERT(status != OptimizingCompiler::FAILED);
+ ASSERT(status != RecompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
- ASSERT(FLAG_concurrent_osr);
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- osr_candidates_.RemoveElement(optimizing_compiler);
- ready_for_osr_.Add(optimizing_compiler);
- } else {
- output_queue_.Enqueue(optimizing_compiler);
- isolate_->stack_guard()->RequestInstallCode();
+ output_queue_.Enqueue(job);
+ isolate_->stack_guard()->RequestInstallCode();
+}
+
+
+static void DisposeRecompileJob(RecompileJob* job,
+ bool restore_function_code) {
+ // The recompile job is allocated in the CompilationInfo's zone.
+ CompilationInfo* info = job->info();
+ if (restore_function_code) {
+ if (info->is_osr()) {
+ if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info);
+ } else {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
}
+ delete info;
}
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (input_queue_.Dequeue(&optimizing_compiler)) {
+ RecompileJob* job;
+ while ((job = NextInput())) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
- CompilationInfo* info = optimizing_compiler->info();
- if (restore_function_code) {
- Handle<JSFunction> function = info->closure();
- function->ReplaceCode(function->shared()->code());
+ // OSR jobs are dealt with separately.
+ if (!job->info()->is_osr()) {
+ DisposeRecompileJob(job, restore_function_code);
}
- delete info;
}
}
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (output_queue_.Dequeue(&optimizing_compiler)) {
- CompilationInfo* info = optimizing_compiler->info();
- if (restore_function_code) {
- Handle<JSFunction> function = info->closure();
- function->ReplaceCode(function->shared()->code());
+ RecompileJob* job;
+ while (output_queue_.Dequeue(&job)) {
+ // OSR jobs are dealt with separately.
+ if (!job->info()->is_osr()) {
+ DisposeRecompileJob(job, restore_function_code);
}
- delete info;
}
+}
+
- osr_candidates_.Clear();
- RemoveStaleOSRCandidates(0);
+void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ if (osr_buffer_[i] != NULL) {
+ DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+ osr_buffer_[i] = NULL;
+ }
+ }
}
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
FlushOutputQueue(true);
+ if (FLAG_concurrent_osr) FlushOsrBuffer(true);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Flushed concurrent recompilation queues.\n");
+ }
}
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
+ if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
if (FLAG_concurrent_recompilation_delay != 0) {
- // Barrier when loading queue length is not necessary since the write
- // happens in CompileNext on the same thread.
- // This is used only for testing.
- while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
+ // At this point the optimizing compiler thread's event loop has stopped.
+ // There is no need for a mutex when reading input_queue_length_.
+ while (input_queue_length_ > 0) CompileNext();
InstallOptimizedFunctions();
} else {
FlushInputQueue(false);
FlushOutputQueue(false);
}
+ if (FLAG_concurrent_osr) FlushOsrBuffer(false);
+
if (FLAG_trace_concurrent_recompilation) {
double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
- if (FLAG_trace_osr && FLAG_concurrent_osr) {
+ if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
+ FLAG_concurrent_osr) {
PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
}
@@ -196,60 +235,96 @@ void OptimizingCompilerThread::Stop() {
void OptimizingCompilerThread::InstallOptimizedFunctions() {
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- OptimizingCompiler* compiler;
- while (true) {
- if (!output_queue_.Dequeue(&compiler)) return;
- Compiler::InstallOptimizedCode(compiler);
- }
- // Remove the oldest OSR candidates that are ready so that we
- // only have limited number of them waiting.
- if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
+ RecompileJob* job;
+ while (output_queue_.Dequeue(&job)) {
+ CompilationInfo* info = job->info();
+ if (info->is_osr()) {
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - ");
+ info->closure()->PrintName();
+ PrintF(" is ready for install and entry at AST id %d]\n",
+ info->osr_ast_id().ToInt());
+ }
+ job->WaitForInstall();
+ BackEdgeTable::RemoveStackCheck(info);
+ } else {
+ Compiler::InstallOptimizedCode(job);
+ }
+ }
}
-void OptimizingCompilerThread::QueueForOptimization(
- OptimizingCompiler* optimizing_compiler) {
+void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
- if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
- optimizing_compiler->info()->closure()->MarkInRecompileQueue();
- } else {
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- osr_candidates_.Add(optimizing_compiler);
+ CompilationInfo* info = job->info();
+ if (info->is_osr()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queueing ");
+ info->closure()->PrintName();
+ PrintF(" for concurrent on-stack replacement.\n");
+ }
osr_attempts_++;
+ BackEdgeTable::AddStackCheck(info);
+ AddToOsrBuffer(job);
+ // Add job to the front of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ // Move shift_ back by one.
+ input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
+ input_queue_[InputQueueIndex(0)] = job;
+ input_queue_length_++;
+ } else {
+ info->closure()->MarkInRecompileQueue();
+ // Add job to the back of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ input_queue_[InputQueueIndex(input_queue_length_)] = job;
+ input_queue_length_++;
+ }
+ if (FLAG_block_concurrent_recompilation) {
+ blocked_jobs_++;
+ } else {
+ input_queue_semaphore_.Signal();
+ }
+}
+
+
+void OptimizingCompilerThread::Unblock() {
+ ASSERT(!IsOptimizerThread());
+ while (blocked_jobs_ > 0) {
+ input_queue_semaphore_.Signal();
+ blocked_jobs_--;
}
- input_queue_.Enqueue(optimizing_compiler);
- input_queue_semaphore_.Signal();
}
-OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
+RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
Handle<JSFunction> function, uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- OptimizingCompiler* result = NULL;
- { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- for (int i = 0; i < ready_for_osr_.length(); i++) {
- if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
- osr_hits_++;
- result = ready_for_osr_.Remove(i);
- break;
- }
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->IsWaitingForInstall() &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ osr_hits_++;
+ osr_buffer_[i] = NULL;
+ return current;
}
}
- RemoveStaleOSRCandidates();
- return result;
+ return NULL;
}
bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- for (int i = 0; i < osr_candidates_.length(); i++) {
- if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
- return true;
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ return !current->IsWaitingForInstall();
}
}
return false;
@@ -258,30 +333,39 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
ASSERT(!IsOptimizerThread());
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- for (int i = 0; i < osr_candidates_.length(); i++) {
- if (*osr_candidates_[i]->info()->closure() == function) {
- return true;
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL && *current->info()->closure() == function) {
+ return !current->IsWaitingForInstall();
}
}
return false;
}
-void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
+void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
ASSERT(!IsOptimizerThread());
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- while (ready_for_osr_.length() > limit) {
- OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
- CompilationInfo* throw_away = compiler->info();
+ // Find the next slot that is empty or has a stale job.
+ while (true) {
+ RecompileJob* stale = osr_buffer_[osr_buffer_cursor_];
+ if (stale == NULL || stale->IsWaitingForInstall()) break;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
+ }
+
+ // Add to found slot and dispose the evicted job.
+ RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_];
+ if (evicted != NULL) {
+ ASSERT(evicted->IsWaitingForInstall());
+ CompilationInfo* info = evicted->info();
if (FLAG_trace_osr) {
PrintF("[COSR - Discarded ");
- throw_away->closure()->PrintName();
- PrintF(", AST id %d]\n",
- throw_away->osr_ast_id().ToInt());
+ info->closure()->PrintName();
+ PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
}
- delete throw_away;
+ DisposeRecompileJob(evicted, false);
}
+ osr_buffer_[osr_buffer_cursor_] = job;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index d1ed6a2c59..754aecebf5 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -40,7 +40,7 @@ namespace v8 {
namespace internal {
class HOptimizedGraphBuilder;
-class OptimizingCompiler;
+class RecompileJob;
class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
@@ -53,38 +53,47 @@ class OptimizingCompilerThread : public Thread {
isolate_(isolate),
stop_semaphore_(0),
input_queue_semaphore_(0),
- osr_candidates_(2),
- ready_for_osr_(2),
+ input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
+ input_queue_length_(0),
+ input_queue_shift_(0),
+ osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
+ osr_buffer_cursor_(0),
osr_hits_(0),
- osr_attempts_(0) {
+ osr_attempts_(0),
+ blocked_jobs_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
- NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
+ if (FLAG_concurrent_osr) {
+ // Allocate and mark OSR buffer slots as empty.
+ osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+ for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
+ }
}
- ~OptimizingCompilerThread() {}
+
+ ~OptimizingCompilerThread();
void Run();
void Stop();
void Flush();
- void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void QueueForOptimization(RecompileJob* optimizing_compiler);
+ void Unblock();
void InstallOptimizedFunctions();
- OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
- uint32_t osr_pc_offset);
+ RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+ uint32_t osr_pc_offset);
bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
- // We don't need a barrier since we have a data dependency right
- // after.
- Atomic32 current_length = NoBarrier_Load(&queue_length_);
-
- // This can be queried only from the execution thread.
- ASSERT(!IsOptimizerThread());
- // Since only the execution thread increments queue_length_ and
- // only one thread can run inside an Isolate at one time, a direct
- // doesn't introduce a race -- queue_length_ may decreased in
- // meantime, but not increased.
- return (current_length < FLAG_concurrent_recompilation_queue_length);
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ return input_queue_length_ < input_queue_capacity_;
+ }
+
+ inline void AgeBufferedOsrJobs() {
+ // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
+ // Dispose said OSR job in the latter case. Calling this on every GC
+ // should make sure that we do not hold onto stale jobs indefinitely.
+ AddToOsrBuffer(NULL);
}
#ifdef DEBUG
@@ -94,13 +103,22 @@ class OptimizingCompilerThread : public Thread {
private:
enum StopFlag { CONTINUE, STOP, FLUSH };
- // Remove the oldest OSR candidates that are ready so that we
- // only have |limit| left waiting.
- void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
-
void FlushInputQueue(bool restore_function_code);
void FlushOutputQueue(bool restore_function_code);
+ void FlushOsrBuffer(bool restore_function_code);
void CompileNext();
+ RecompileJob* NextInput();
+
+ // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
+ // Tasks evicted from the cyclic buffer are discarded.
+ void AddToOsrBuffer(RecompileJob* compiler);
+
+ inline int InputQueueIndex(int i) {
+ int result = (i + input_queue_shift_) % input_queue_capacity_;
+ ASSERT_LE(0, result);
+ ASSERT_LT(result, input_queue_capacity_);
+ return result;
+ }
#ifdef DEBUG
int thread_id_;
@@ -111,25 +129,29 @@ class OptimizingCompilerThread : public Thread {
Semaphore stop_semaphore_;
Semaphore input_queue_semaphore_;
- // Queue of incoming recompilation tasks (including OSR).
- UnboundQueue<OptimizingCompiler*> input_queue_;
+ // Circular queue of incoming recompilation tasks (including OSR).
+ RecompileJob** input_queue_;
+ int input_queue_capacity_;
+ int input_queue_length_;
+ int input_queue_shift_;
+ Mutex input_queue_mutex_;
+
// Queue of recompilation tasks ready to be installed (excluding OSR).
- UnboundQueue<OptimizingCompiler*> output_queue_;
- // List of all OSR related recompilation tasks (both incoming and ready ones).
- List<OptimizingCompiler*> osr_candidates_;
- // List of recompilation tasks ready for OSR.
- List<OptimizingCompiler*> ready_for_osr_;
+ UnboundQueue<RecompileJob*> output_queue_;
+
+ // Cyclic buffer of recompilation tasks for OSR.
+ RecompileJob** osr_buffer_;
+ int osr_buffer_capacity_;
+ int osr_buffer_cursor_;
volatile AtomicWord stop_thread_;
- volatile Atomic32 queue_length_;
TimeDelta time_spent_compiling_;
TimeDelta time_spent_total_;
- Mutex osr_list_mutex_;
int osr_hits_;
int osr_attempts_;
- static const int kReadyForOSRLimit = 4;
+ int blocked_jobs_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 05ae11e429..d84649d86b 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -536,7 +536,8 @@ Parser::FunctionState::~FunctionState() {
// Implementation of Parser
Parser::Parser(CompilationInfo* info)
- : isolate_(info->isolate()),
+ : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ isolate_(info->isolate()),
symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
@@ -548,11 +549,6 @@ Parser::Parser(CompilationInfo* info)
extension_(info->extension()),
pre_parse_data_(NULL),
fni_(NULL),
- allow_natives_syntax_(false),
- allow_lazy_(false),
- allow_generators_(false),
- allow_for_of_(false),
- stack_overflow_(false),
parenthesized_function_(false),
zone_(info->zone()),
info_(info) {
@@ -569,7 +565,9 @@ Parser::Parser(CompilationInfo* info)
FunctionLiteral* Parser::ParseProgram() {
- HistogramTimerScope timer_scope(isolate()->counters()->parse());
+ // TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
+ // see comment for HistogramTimerScope class.
+ HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
ElapsedTimer timer;
@@ -652,10 +650,10 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_loc = scanner().location().beg_pos;
+ int beg_pos = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
+ CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
@@ -685,11 +683,12 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::ANONYMOUS_EXPRESSION,
FunctionLiteral::kGlobalOrEval,
FunctionLiteral::kNotParenthesized,
- FunctionLiteral::kNotGenerator);
+ FunctionLiteral::kNotGenerator,
+ 0);
result->set_ast_properties(factory()->visitor()->ast_properties());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
- } else if (stack_overflow_) {
+ } else if (stack_overflow()) {
isolate()->StackOverflow();
}
}
@@ -786,7 +785,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
ASSERT(target_stack_ == NULL);
if (result == NULL) {
- if (stack_overflow_) isolate()->StackOverflow();
+ if (stack_overflow()) isolate()->StackOverflow();
} else {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -984,6 +983,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// ModuleDeclaration:
// 'module' Identifier Module
+ int pos = peek_position();
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
@@ -994,7 +994,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_);
+ factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1009,9 +1009,9 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
if (names) names->Add(name, zone());
if (module->body() == NULL)
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(pos);
else
- return factory()->NewModuleStatement(proxy, module->body());
+ return factory()->NewModuleStatement(proxy, module->body(), pos);
}
@@ -1046,8 +1046,9 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
// Module:
// '{' ModuleElement '}'
+ int pos = peek_position();
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(NULL, 16, false);
+ Block* body = factory()->NewBlock(NULL, 16, false, RelocInfo::kNoPosition);
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
@@ -1092,7 +1093,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
ASSERT(*ok);
interface->Freeze(ok);
ASSERT(*ok);
- return factory()->NewModuleLiteral(body, interface);
+ return factory()->NewModuleLiteral(body, interface, pos);
}
@@ -1101,6 +1102,7 @@ Module* Parser::ParseModulePath(bool* ok) {
// Identifier
// ModulePath '.' Identifier
+ int pos = peek_position();
Module* result = ParseModuleVariable(CHECK_OK);
while (Check(Token::PERIOD)) {
Handle<String> name = ParseIdentifierName(CHECK_OK);
@@ -1108,7 +1110,7 @@ Module* Parser::ParseModulePath(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Path .%s ", name->ToAsciiArray());
#endif
- Module* member = factory()->NewModulePath(result, name);
+ Module* member = factory()->NewModulePath(result, name, pos);
result->interface()->Add(name, member->interface(), zone(), ok);
if (!*ok) {
#ifdef DEBUG
@@ -1134,6 +1136,7 @@ Module* Parser::ParseModuleVariable(bool* ok) {
// ModulePath:
// Identifier
+ int pos = peek_position();
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1143,7 +1146,7 @@ Module* Parser::ParseModuleVariable(bool* ok) {
factory(), name, Interface::NewModule(zone()),
scanner().location().beg_pos);
- return factory()->NewModuleVariable(proxy);
+ return factory()->NewModuleVariable(proxy, pos);
}
@@ -1151,6 +1154,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Module:
// String
+ int pos = peek_position();
Expect(Token::STRING, CHECK_OK);
Handle<String> symbol = GetSymbol();
@@ -1163,10 +1167,10 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
- Block* body = factory()->NewBlock(NULL, 1, false);
+ Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
- Module* result = factory()->NewModuleLiteral(body, interface);
+ Module* result = factory()->NewModuleLiteral(body, interface, pos);
interface->Freeze(ok);
ASSERT(*ok);
interface->Unify(scope->interface(), zone(), ok);
@@ -1194,6 +1198,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
//
// TODO(ES6): implement destructuring ImportSpecifiers
+ int pos = peek_position();
Expect(Token::IMPORT, CHECK_OK);
ZoneStringList names(1, zone());
@@ -1211,7 +1216,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
// Generate a separate declaration for each identifier.
// TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1232,7 +1237,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_);
+ factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1256,6 +1261,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
ZoneStringList names(1, zone());
switch (peek()) {
case Token::IDENTIFIER: {
+ int pos = position();
Handle<String> name = ParseIdentifier(CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
@@ -1266,7 +1272,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
- result = factory()->NewEmptyStatement();
+ result = factory()->NewEmptyStatement(pos);
} else {
result = ParseModuleDeclaration(&names, CHECK_OK);
}
@@ -1305,7 +1311,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_);
+ // factory()->NewExportDeclaration(proxy, top_scope_, position);
// top_scope_->AddDeclaration(declaration);
}
@@ -1363,10 +1369,6 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// labels can be simply ignored in all other cases; except for
// trivial labeled break statements 'label: break label' which is
// parsed into an empty statement.
-
- // Keep the source position of the statement
- int statement_pos = scanner().peek_location().beg_pos;
- Statement* stmt = NULL;
switch (peek()) {
case Token::LBRACE:
return ParseBlock(labels, ok);
@@ -1374,52 +1376,41 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
case Token::CONST: // fall through
case Token::LET:
case Token::VAR:
- stmt = ParseVariableStatement(kStatement, NULL, ok);
- break;
+ return ParseVariableStatement(kStatement, NULL, ok);
case Token::SEMICOLON:
Next();
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
case Token::IF:
- stmt = ParseIfStatement(labels, ok);
- break;
+ return ParseIfStatement(labels, ok);
case Token::DO:
- stmt = ParseDoWhileStatement(labels, ok);
- break;
+ return ParseDoWhileStatement(labels, ok);
case Token::WHILE:
- stmt = ParseWhileStatement(labels, ok);
- break;
+ return ParseWhileStatement(labels, ok);
case Token::FOR:
- stmt = ParseForStatement(labels, ok);
- break;
+ return ParseForStatement(labels, ok);
case Token::CONTINUE:
- stmt = ParseContinueStatement(ok);
- break;
+ return ParseContinueStatement(ok);
case Token::BREAK:
- stmt = ParseBreakStatement(labels, ok);
- break;
+ return ParseBreakStatement(labels, ok);
case Token::RETURN:
- stmt = ParseReturnStatement(ok);
- break;
+ return ParseReturnStatement(ok);
case Token::WITH:
- stmt = ParseWithStatement(labels, ok);
- break;
+ return ParseWithStatement(labels, ok);
case Token::SWITCH:
- stmt = ParseSwitchStatement(labels, ok);
- break;
+ return ParseSwitchStatement(labels, ok);
case Token::THROW:
- stmt = ParseThrowStatement(ok);
- break;
+ return ParseThrowStatement(ok);
case Token::TRY: {
// NOTE: It is somewhat complicated to have labels on
@@ -1427,12 +1418,10 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = factory()->NewBlock(labels, 1, false);
+ Block* result =
+ factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
- if (statement) {
- statement->set_statement_pos(statement_pos);
- }
if (result) result->AddStatement(statement, zone());
return result;
}
@@ -1459,16 +1448,11 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
}
case Token::DEBUGGER:
- stmt = ParseDebuggerStatement(ok);
- break;
+ return ParseDebuggerStatement(ok);
default:
- stmt = ParseExpressionOrLabelledStatement(labels, ok);
+ return ParseExpressionOrLabelledStatement(labels, ok);
}
-
- // Store the source position of the statement
- if (stmt != NULL) stmt->set_statement_pos(statement_pos);
- return stmt;
}
@@ -1480,7 +1464,7 @@ VariableProxy* Parser::NewUnresolved(
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
return DeclarationScope(mode)->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
+ factory(), name, interface, position());
}
@@ -1647,6 +1631,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// declaration is resolved by looking up the function through a
// callback provided by the extension.
Statement* Parser::ParseNativeDeclaration(bool* ok) {
+ int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -1667,39 +1652,19 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// because of lazy compilation.
DeclarationScope(VAR)->ForceEagerCompilation();
- // Compute the function template for the native function.
- v8::Handle<v8::FunctionTemplate> fun_template =
- extension_->GetNativeFunction(v8::Utils::ToLocal(name));
- ASSERT(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
- const int literals = fun->NumberOfLiterals();
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- bool is_generator = false;
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator,
- code, Handle<ScopeInfo>(fun->shared()->scope_info()));
- shared->set_construct_stub(*construct_stub);
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->formal_parameter_count();
- shared->set_formal_parameter_count(parameters);
-
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
+ factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
- SharedFunctionInfoLiteral* lit =
- factory()->NewSharedFunctionInfoLiteral(shared);
+ NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
+ name, extension_, RelocInfo::kNoPosition);
return factory()->NewExpressionStatement(
factory()->NewAssignment(
- Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition));
+ Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition),
+ pos);
}
@@ -1710,7 +1675,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
+ int pos = position();
bool is_generator = allow_generators() && Check(Token::MUL);
bool is_strict_reserved = false;
Handle<String> name = ParseIdentifierOrStrictReservedWord(
@@ -1718,7 +1683,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
FunctionLiteral* fun = ParseFunctionLiteral(name,
is_strict_reserved,
is_generator,
- function_token_position,
+ pos,
FunctionLiteral::DECLARATION,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
@@ -1730,10 +1695,10 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
@@ -1747,7 +1712,8 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = factory()->NewBlock(labels, 16, false);
+ Block* result =
+ factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
@@ -1768,7 +1734,8 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// '{' BlockElement* '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false);
+ Block* body =
+ factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
@@ -1838,6 +1805,8 @@ Block* Parser::ParseVariableDeclarations(
// TODO(ES6):
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
+
+ int pos = peek_position();
VariableMode mode = VAR;
// True if the binding needs initialization. 'let' and 'const' declared
// bindings are created uninitialized by their declaration nodes and
@@ -1923,7 +1892,7 @@ Block* Parser::ParseVariableDeclarations(
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, pos);
int nvars = 0; // the number of variables declared
Handle<String> name;
do {
@@ -1960,7 +1929,7 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_);
+ factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
@@ -2000,11 +1969,11 @@ Block* Parser::ParseVariableDeclarations(
Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
- int position = -1;
+ int pos = -1;
// Harmony consts have non-optional initializers.
if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
Expect(Token::ASSIGN, CHECK_OK);
- position = scanner().location().beg_pos;
+ pos = position();
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
// Don't infer if it is "a = function(){...}();"-like expression.
if (fni_ != NULL &&
@@ -2019,12 +1988,12 @@ Block* Parser::ParseVariableDeclarations(
// Record the end position of the initializer.
if (proxy->var() != NULL) {
- proxy->var()->set_initializer_position(scanner().location().end_pos);
+ proxy->var()->set_initializer_position(position());
}
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
if (value == NULL && needs_init) {
- value = GetLiteralUndefined();
+ value = GetLiteralUndefined(position());
}
// Global variable declarations must be compiled in a specific
@@ -2052,7 +2021,7 @@ Block* Parser::ParseVariableDeclarations(
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(3, zone());
// We have at least 1 parameter.
- arguments->Add(factory()->NewLiteral(name), zone());
+ arguments->Add(factory()->NewLiteral(name, pos), zone());
CallRuntime* initialize;
if (is_const) {
@@ -2066,12 +2035,12 @@ Block* Parser::ParseVariableDeclarations(
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeConstGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments);
+ arguments, pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode), zone());
+ arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2089,11 +2058,12 @@ Block* Parser::ParseVariableDeclarations(
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeVarGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
- arguments);
+ arguments, pos);
}
- block->AddStatement(factory()->NewExpressionStatement(initialize),
- zone());
+ block->AddStatement(
+ factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
+ zone());
} else if (needs_init) {
// Constant initializations always assign to the declared constant which
// is always at the function scope level. This is only relevant for
@@ -2106,9 +2076,10 @@ Block* Parser::ParseVariableDeclarations(
ASSERT(proxy->var() != NULL);
ASSERT(value != NULL);
Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
+ factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
value = NULL;
}
@@ -2122,9 +2093,10 @@ Block* Parser::ParseVariableDeclarations(
VariableProxy* proxy =
initialization_scope->NewUnresolved(factory(), name, interface);
Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
+ factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
}
if (fni_ != NULL) fni_->Leave();
@@ -2156,6 +2128,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
+ int pos = peek_position();
bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
@@ -2215,7 +2188,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
scanner().literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
- return factory()->NewExpressionStatement(expr);
+ return factory()->NewExpressionStatement(expr, pos);
}
@@ -2223,6 +2196,7 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
+ int pos = peek_position();
Expect(Token::IF, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Expression* condition = ParseExpression(true, CHECK_OK);
@@ -2233,9 +2207,10 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
Next();
else_statement = ParseStatement(labels, CHECK_OK);
} else {
- else_statement = factory()->NewEmptyStatement();
+ else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
- return factory()->NewIfStatement(condition, then_statement, else_statement);
+ return factory()->NewIfStatement(
+ condition, then_statement, else_statement, pos);
}
@@ -2243,6 +2218,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
// ContinueStatement ::
// 'continue' Identifier? ';'
+ int pos = peek_position();
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
@@ -2265,7 +2241,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewContinueStatement(target);
+ return factory()->NewContinueStatement(target, pos);
}
@@ -2273,6 +2249,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
// BreakStatement ::
// 'break' Identifier? ';'
+ int pos = peek_position();
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
@@ -2284,7 +2261,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
// empty statements, e.g. 'l1: l2: l3: break l2;'
if (!label.is_null() && ContainsLabel(labels, label)) {
ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(pos);
}
BreakableStatement* target = NULL;
target = LookupBreakTarget(label, CHECK_OK);
@@ -2301,7 +2278,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewBreakStatement(target);
+ return factory()->NewBreakStatement(target, pos);
}
@@ -2309,10 +2286,11 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// ReturnStatement ::
// 'return' Expression? ';'
- // Consume the return token. It is necessary to do the before
+ // Consume the return token. It is necessary to do that before
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
+ int pos = position();
Token::Value tok = peek();
Statement* result;
@@ -2321,7 +2299,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
- return_value = GetLiteralUndefined();
+ return_value = GetLiteralUndefined(position());
} else {
return_value = ParseExpression(true, CHECK_OK);
}
@@ -2330,10 +2308,10 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Expression* generator = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
- generator, return_value, Yield::FINAL, RelocInfo::kNoPosition);
- result = factory()->NewExpressionStatement(yield);
+ generator, return_value, Yield::FINAL, pos);
+ result = factory()->NewExpressionStatement(yield, pos);
} else {
- result = factory()->NewReturnStatement(return_value);
+ result = factory()->NewReturnStatement(return_value, pos);
}
// An ECMAScript program is considered syntactically incorrect if it
@@ -2347,7 +2325,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Handle<String> message = isolate()->factory()->illegal_return_string();
Expression* throw_error =
NewThrowSyntaxError(message, Handle<Object>::null());
- return factory()->NewExpressionStatement(throw_error);
+ return factory()->NewExpressionStatement(throw_error, pos);
}
return result;
}
@@ -2358,6 +2336,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
+ int pos = position();
if (!top_scope_->is_classic_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
@@ -2377,7 +2356,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
stmt = ParseStatement(labels, CHECK_OK);
with_scope->set_end_position(scanner().location().end_pos);
}
- return factory()->NewWithStatement(with_scope, expr, stmt);
+ return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2401,7 +2380,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
*default_seen_ptr = true;
}
Expect(Token::COLON, CHECK_OK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
ZoneList<Statement*>* statements =
new(zone()) ZoneList<Statement*>(5, zone());
while (peek() != Token::CASE &&
@@ -2411,7 +2390,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
statements->Add(stat, zone());
}
- return new(zone()) CaseClause(isolate(), label, statements, pos);
+ return factory()->NewCaseClause(label, statements, pos);
}
@@ -2420,7 +2399,8 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- SwitchStatement* statement = factory()->NewSwitchStatement(labels);
+ SwitchStatement* statement =
+ factory()->NewSwitchStatement(labels, peek_position());
Target target(&this->target_stack_, statement);
Expect(Token::SWITCH, CHECK_OK);
@@ -2447,7 +2427,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
// 'throw' Expression ';'
Expect(Token::THROW, CHECK_OK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
if (scanner().HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
@@ -2456,7 +2436,8 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expression* exception = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos));
+ return factory()->NewExpressionStatement(
+ factory()->NewThrow(exception, pos), pos);
}
@@ -2473,6 +2454,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// 'finally' Block
Expect(Token::TRY, CHECK_OK);
+ int pos = position();
TargetCollector try_collector(zone());
Block* try_block;
@@ -2544,9 +2526,10 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
ASSERT(catch_scope != NULL && catch_variable != NULL);
int index = current_function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
+ index, try_block, catch_scope, catch_variable, catch_block,
+ RelocInfo::kNoPosition);
statement->set_escaping_targets(try_collector.targets());
- try_block = factory()->NewBlock(NULL, 1, false);
+ try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -2557,11 +2540,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
ASSERT(catch_scope != NULL && catch_variable != NULL);
int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
+ index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
int index = current_function_state_->NextHandlerIndex();
- result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
+ result = factory()->NewTryFinallyStatement(
+ index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
try_collector.targets()->AddAll(*catch_collector.targets(), zone());
}
@@ -2576,7 +2560,8 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- DoWhileStatement* loop = factory()->NewDoWhileStatement(labels);
+ DoWhileStatement* loop =
+ factory()->NewDoWhileStatement(labels, peek_position());
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
@@ -2584,11 +2569,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- if (loop != NULL) {
- int position = scanner().location().beg_pos;
- loop->set_condition_position(position);
- }
-
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -2607,7 +2587,7 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- WhileStatement* loop = factory()->NewWhileStatement(labels);
+ WhileStatement* loop = factory()->NewWhileStatement(labels, peek_position());
Target target(&this->target_stack_, loop);
Expect(Token::WHILE, CHECK_OK);
@@ -2666,8 +2646,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// var result = iterator.next();
{
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- Expression* next_literal =
- factory()->NewLiteral(heap_factory->next_string());
+ Expression* next_literal = factory()->NewLiteral(
+ heap_factory->next_string(), RelocInfo::kNoPosition);
Expression* next_property = factory()->NewProperty(
iterator_proxy, next_literal, RelocInfo::kNoPosition);
ZoneList<Expression*>* next_arguments =
@@ -2681,8 +2661,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// result.done
{
- Expression* done_literal =
- factory()->NewLiteral(heap_factory->done_string());
+ Expression* done_literal = factory()->NewLiteral(
+ heap_factory->done_string(), RelocInfo::kNoPosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
result_done = factory()->NewProperty(
result_proxy, done_literal, RelocInfo::kNoPosition);
@@ -2690,8 +2670,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// each = result.value
{
- Expression* value_literal =
- factory()->NewLiteral(heap_factory->value_string());
+ Expression* value_literal = factory()->NewLiteral(
+ heap_factory->value_string(), RelocInfo::kNoPosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
Expression* result_value = factory()->NewProperty(
result_proxy, value_literal, RelocInfo::kNoPosition);
@@ -2711,6 +2691,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+ int pos = peek_position();
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
@@ -2735,7 +2716,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) {
Interface* interface =
is_const ? Interface::NewConst() : Interface::NewValue();
- ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
Expression* enumerable = ParseExpression(true, CHECK_OK);
@@ -2745,7 +2727,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result =
+ factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
top_scope_ = saved_scope;
@@ -2789,7 +2772,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
@@ -2801,11 +2785,12 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
VariableProxy* each =
top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
- Block* body_block = factory()->NewBlock(NULL, 3, false);
+ Block* body_block =
+ factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
body_block->AddStatement(variable_statement, zone());
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
@@ -2835,7 +2820,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
isolate()->factory()->invalid_lhs_in_for_in_string();
expression = NewThrowReferenceError(message);
}
- ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
Expression* enumerable = ParseExpression(true, CHECK_OK);
@@ -2851,13 +2837,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
return loop;
} else {
- init = factory()->NewExpressionStatement(expression);
+ init = factory()->NewExpressionStatement(
+ expression, RelocInfo::kNoPosition);
}
}
}
// Standard 'for' loop
- ForStatement* loop = factory()->NewForStatement(labels);
+ ForStatement* loop = factory()->NewForStatement(labels, pos);
Target target(&this->target_stack_, loop);
// Parsed initializer at this point.
@@ -2872,7 +2859,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* next = NULL;
if (peek() != Token::RPAREN) {
Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp);
+ next = factory()->NewExpressionStatement(exp, RelocInfo::kNoPosition);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -2892,7 +2879,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// for (; c; n) b
// }
ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
@@ -2914,10 +2901,9 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
while (peek() == Token::COMMA) {
Expect(Token::COMMA, CHECK_OK);
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result =
- factory()->NewBinaryOperation(Token::COMMA, result, right, position);
+ result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
}
return result;
}
@@ -2961,7 +2947,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
MarkAsLValue(expression);
Token::Value op = Next(); // Get assignment operator.
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
// TODO(1231235): We try to estimate the set of properties set by
@@ -3005,15 +2991,14 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
Expression* Parser::ParseYieldExpression(bool* ok) {
// YieldExpression ::
// 'yield' '*'? AssignmentExpression
- int position = scanner().peek_location().beg_pos;
+ int pos = peek_position();
Expect(Token::YIELD, CHECK_OK);
Yield::Kind kind =
Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
Expression* generator_object = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- Yield* yield =
- factory()->NewYield(generator_object, expression, kind, position);
+ Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
if (kind == Yield::DELEGATING) {
yield->set_index(current_function_state_->NextHandlerIndex());
}
@@ -3027,6 +3012,7 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+ int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
@@ -3034,17 +3020,14 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
- int left_position = scanner().peek_location().beg_pos;
Expression* left = ParseAssignmentExpression(true, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
- int right_position = scanner().peek_location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(
- expression, left, right, left_position, right_position);
+ return factory()->NewConditional(expression, left, right, pos);
}
-static int Precedence(Token::Value tok, bool accept_IN) {
+int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
if (tok == Token::IN && !accept_IN)
return 0; // 0 precedence will terminate binary expression parsing
@@ -3060,7 +3043,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
Token::Value op = Next();
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
// Compute some expressions involving only number literals.
@@ -3071,47 +3054,47 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
switch (op) {
case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val);
+ x = factory()->NewNumberLiteral(x_val + y_val, pos);
continue;
case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val);
+ x = factory()->NewNumberLiteral(x_val - y_val, pos);
continue;
case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val);
+ x = factory()->NewNumberLiteral(x_val * y_val, pos);
continue;
case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val);
+ x = factory()->NewNumberLiteral(x_val / y_val, pos);
continue;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
default:
@@ -3130,15 +3113,15 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = factory()->NewCompareOperation(cmp, x, y, position);
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
if (cmp != op) {
// The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, position);
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
} else {
// We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, position);
+ x = factory()->NewBinaryOperation(op, x, y, pos);
}
}
}
@@ -3162,7 +3145,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* expression = ParseUnaryExpression(CHECK_OK);
if (expression != NULL && (expression->AsLiteral() != NULL)) {
@@ -3170,9 +3153,8 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->BooleanValue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition),
- isolate());
- return factory()->NewLiteral(result);
+ Handle<Object> result = isolate()->factory()->ToBoolean(!condition);
+ return factory()->NewLiteral(result, pos);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->Number();
@@ -3180,9 +3162,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
case Token::ADD:
return expression;
case Token::SUB:
- return factory()->NewNumberLiteral(-value);
+ return factory()->NewNumberLiteral(-value, pos);
case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value));
+ return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
default:
break;
}
@@ -3205,25 +3187,25 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
if (op == Token::ADD) {
return factory()->NewBinaryOperation(Token::MUL,
expression,
- factory()->NewNumberLiteral(1),
- position);
+ factory()->NewNumberLiteral(1, pos),
+ pos);
}
// The same idea for '-foo' => 'foo*(-1)'.
if (op == Token::SUB) {
return factory()->NewBinaryOperation(Token::MUL,
expression,
- factory()->NewNumberLiteral(-1),
- position);
+ factory()->NewNumberLiteral(-1, pos),
+ pos);
}
// ...and one more time for '~foo' => 'foo^(~0)'.
if (op == Token::BIT_NOT) {
return factory()->NewBinaryOperation(Token::BIT_XOR,
expression,
- factory()->NewNumberLiteral(~0),
- position);
+ factory()->NewNumberLiteral(~0, pos),
+ pos);
}
- return factory()->NewUnaryOperation(op, expression, position);
+ return factory()->NewUnaryOperation(op, expression, pos);
} else if (Token::IsCountOp(op)) {
op = Next();
@@ -3244,11 +3226,10 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
MarkAsLValue(expression);
- int position = scanner().location().beg_pos;
return factory()->NewCountOperation(op,
true /* prefix */,
expression,
- position);
+ position());
} else {
return ParsePostfixExpression(ok);
@@ -3280,12 +3261,11 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
MarkAsLValue(expression);
Token::Value next = Next();
- int position = scanner().location().beg_pos;
expression =
factory()->NewCountOperation(next,
false /* postfix */,
expression,
- position);
+ position());
}
return expression;
}
@@ -3306,7 +3286,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
@@ -3318,14 +3298,14 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
if (scanner().current_token() == Token::IDENTIFIER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
- pos = scanner().location().beg_pos;
+ pos = position();
} else {
// For other kinds of calls we record position of the parenthesis as
// position of the call. Note that this is extremely important for
// expressions of the form function(){...}() for which call position
// should not point to the closing brace otherwise it will intersect
// with positions recorded for function literal and confuse debugger.
- pos = scanner().peek_location().beg_pos;
+ pos = peek_position();
// Also the trailing parenthesis are a hint that the function will
// be called immediately. If we happen to have parsed a preceding
// function literal eagerly, we can also compile it eagerly.
@@ -3354,10 +3334,10 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
case Token::PERIOD: {
Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3382,7 +3362,7 @@ Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
// member expression parser, which is only allowed to match argument
// lists as long as it has 'new' prefixes left
Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, scanner().location().beg_pos);
+ PositionStack::Element pos(stack, position());
Expression* result;
if (peek() == Token::NEW) {
@@ -3421,7 +3401,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Expression* result = NULL;
if (peek() == Token::FUNCTION) {
Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
+ int function_token_position = position();
bool is_generator = allow_generators() && Check(Token::MUL);
Handle<String> name;
bool is_strict_reserved_name = false;
@@ -3446,7 +3426,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
if (fni_ != NULL) {
@@ -3462,10 +3442,10 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
}
case Token::PERIOD: {
Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3473,8 +3453,8 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
if ((stack == NULL) || stack->is_empty()) return result;
// Consume one of the new prefixes (already parsed).
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int last = stack->pop();
- result = factory()->NewCallNew(result, args, last);
+ int pos = stack->pop();
+ result = factory()->NewCallNew(result, args, pos);
break;
}
default:
@@ -3491,9 +3471,10 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
// DebuggerStatement ::
// 'debugger' ';'
+ int pos = peek_position();
Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return factory()->NewDebuggerStatement();
+ return factory()->NewDebuggerStatement(pos);
}
@@ -3501,7 +3482,7 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
// over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow_) return;
+ if (token == Token::ILLEGAL && stack_overflow()) return;
// Four of the tokens are treated specially
switch (token) {
case Token::EOS:
@@ -3555,6 +3536,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
// RegExpLiteral
// '(' Expression ')'
+ int pos = peek_position();
Expression* result = NULL;
switch (peek()) {
case Token::THIS: {
@@ -3565,17 +3547,17 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NULL_LITERAL:
Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value());
+ result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
break;
case Token::TRUE_LITERAL:
Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value());
+ result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
break;
case Token::FALSE_LITERAL:
Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value());
+ result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
break;
case Token::IDENTIFIER:
@@ -3589,8 +3571,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
PrintF("# Variable %s ", name->ToAsciiArray());
#endif
Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
+ result = top_scope_->NewUnresolved(factory(), name, interface, pos);
break;
}
@@ -3601,14 +3582,14 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTAL |
ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- result = factory()->NewNumberLiteral(value);
+ result = factory()->NewNumberLiteral(value, pos);
break;
}
case Token::STRING: {
Consume(Token::STRING);
Handle<String> symbol = GetSymbol();
- result = factory()->NewLiteral(symbol);
+ result = factory()->NewLiteral(symbol, pos);
if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -3662,12 +3643,13 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
+ int pos = peek_position();
ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
Expression* elem;
if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole();
+ elem = GetLiteralTheHole(peek_position());
} else {
elem = ParseAssignmentExpression(true, CHECK_OK);
}
@@ -3729,7 +3711,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
literals->set(1, *element_values);
return factory()->NewArrayLiteral(
- literals, values, literal_index, is_simple, depth);
+ literals, values, literal_index, is_simple, depth, pos);
}
@@ -3793,84 +3775,6 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
}
-// Validation per 11.1.5 Object Initialiser
-class ObjectLiteralPropertyChecker {
- public:
- ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
- props_(Literal::Match),
- parser_(parser),
- language_mode_(language_mode) {
- }
-
- void CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok);
-
- private:
- enum PropertyKind {
- kGetAccessor = 0x01,
- kSetAccessor = 0x02,
- kAccessor = kGetAccessor | kSetAccessor,
- kData = 0x04
- };
-
- static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
- switch (property->kind()) {
- case ObjectLiteral::Property::GETTER:
- return kGetAccessor;
- case ObjectLiteral::Property::SETTER:
- return kSetAccessor;
- default:
- return kData;
- }
- }
-
- HashMap props_;
- Parser* parser_;
- LanguageMode language_mode_;
-};
-
-
-void ObjectLiteralPropertyChecker::CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok) {
- ASSERT(property != NULL);
- Literal* literal = property->key();
- HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true);
- intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
- intptr_t curr = GetPropertyKind(property);
-
- // Duplicate data properties are illegal in strict or extended mode.
- if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
- parser_->ReportMessageAt(loc, "strict_duplicate_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Data property conflicting with an accessor.
- if (((curr & kData) && (prev & kAccessor)) ||
- ((prev & kData) && (curr & kAccessor))) {
- parser_->ReportMessageAt(loc, "accessor_data_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Two accessors of the same type conflicting
- if ((curr & prev & kAccessor) != 0) {
- parser_->ReportMessageAt(loc, "accessor_get_set",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
-
- // Update map
- entry->value = reinterpret_cast<void*> (prev | curr);
- *ok = true;
-}
-
-
void Parser::BuildObjectLiteralConstantProperties(
ZoneList<ObjectLiteral::Property*>* properties,
Handle<FixedArray> constant_properties,
@@ -3943,41 +3847,6 @@ void Parser::BuildObjectLiteralConstantProperties(
}
-ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
- bool* ok) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next == Token::IDENTIFIER || next == Token::NUMBER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::STRING || is_keyword) {
- Handle<String> name;
- if (is_keyword) {
- name = isolate_->factory()->InternalizeUtf8String(Token::String(next));
- } else {
- name = GetSymbol();
- }
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- false, // reserved words are allowed here
- false, // not a generator
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- return factory()->NewObjectLiteralProperty(is_getter, value);
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
-}
-
-
Expression* Parser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
// '{' (
@@ -3985,12 +3854,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
+ int pos = peek_position();
ZoneList<ObjectLiteral::Property*>* properties =
new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
int number_of_boilerplate_properties = 0;
bool has_function = false;
- ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
+ ObjectLiteralChecker checker(this, top_scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
@@ -3999,9 +3869,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Literal* key = NULL;
Token::Value next = peek();
-
- // Location of the property name token
- Scanner::Location loc = scanner().peek_location();
+ int next_pos = peek_position();
switch (next) {
case Token::FUTURE_RESERVED_WORD:
@@ -4014,27 +3882,54 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Update loc to point to the identifier
- loc = scanner().peek_location();
- ObjectLiteral::Property* property =
- ParseObjectLiteralGetSet(is_getter, CHECK_OK);
- if (IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- // Validate the property.
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
+ // Special handling of getter and setter syntax:
+ // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
+ // We have already read the "get" or "set" keyword.
+ Token::Value next = Next();
+ bool is_keyword = Token::IsKeyword(next);
+ if (next != i::Token::IDENTIFIER &&
+ next != i::Token::FUTURE_RESERVED_WORD &&
+ next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != i::Token::NUMBER &&
+ next != i::Token::STRING &&
+ !is_keyword) {
+ // Unexpected token.
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return NULL;
+ }
+ // Validate the property.
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(next, type, CHECK_OK);
+ Handle<String> name = is_keyword
+ ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
+ : GetSymbol();
+ FunctionLiteral* value =
+ ParseFunctionLiteral(name,
+ false, // reserved words are allowed here
+ false, // not a generator
+ RelocInfo::kNoPosition,
+ FunctionLiteral::ANONYMOUS_EXPRESSION,
+ CHECK_OK);
+ // Allow any number of parameters for compatibilty with JSC.
+ // Specification only allows zero parameters for get and one for set.
+ ObjectLiteral::Property* property =
+ factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
+ if (IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ continue; // restart the while
}
// Failed to parse as get/set property, so it's just a property
// called "get" or "set".
- key = factory()->NewLiteral(id);
+ key = factory()->NewLiteral(id, next_pos);
break;
}
case Token::STRING: {
@@ -4043,10 +3938,10 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(string);
uint32_t index;
if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index);
+ key = factory()->NewNumberLiteral(index, next_pos);
break;
}
- key = factory()->NewLiteral(string);
+ key = factory()->NewLiteral(string, next_pos);
break;
}
case Token::NUMBER: {
@@ -4056,14 +3951,14 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTAL |
ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- key = factory()->NewNumberLiteral(value);
+ key = factory()->NewNumberLiteral(value, next_pos);
break;
}
default:
if (Token::IsKeyword(next)) {
Consume(next);
Handle<String> string = GetSymbol();
- key = factory()->NewLiteral(string);
+ key = factory()->NewLiteral(string, next_pos);
} else {
// Unexpected token.
Token::Value next = Next();
@@ -4073,6 +3968,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
}
+ // Validate the property
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
+
Expect(Token::COLON, CHECK_OK);
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
@@ -4090,8 +3988,6 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
- // Validate the property
- checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property, zone());
// TODO(1240767): Consider allowing trailing comma.
@@ -4127,11 +4023,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
fast_elements,
depth,
may_store_doubles,
- has_function);
+ has_function,
+ pos);
}
Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+ int pos = peek_position();
if (!scanner().ScanRegExpPattern(seen_equal)) {
Next();
ReportMessage("unterminated_regexp", Vector<const char*>::empty());
@@ -4146,7 +4044,7 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
Handle<String> js_flags = NextLiteralString(TENURED);
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index);
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
}
@@ -4271,12 +4169,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Handle<String> function_name,
bool name_is_strict_reserved,
bool is_generator,
- int function_token_position,
+ int function_token_pos,
FunctionLiteral::FunctionType function_type,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
+ int pos = function_token_pos == RelocInfo::kNoPosition
+ ? peek_position() : function_token_pos;
+
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
// handle to decide whether to invoke function name inference.
@@ -4414,8 +4315,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
- VariableDeclaration* fvar_declaration =
- factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
+ VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
+ proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
top_scope_->DeclareFunctionVar(fvar_declaration);
}
@@ -4436,7 +4337,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
parenthesized_function_ = false; // The bit was set for this function only.
if (is_lazily_compiled) {
- int function_block_pos = scanner().location().beg_pos;
+ int function_block_pos = position();
FunctionEntry entry;
if (pre_parse_data_ != NULL) {
// If we have pre_parse_data_, we use it to skip parsing the function
@@ -4466,11 +4367,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// building an AST. This gathers the data needed to build a lazy
// function.
SingletonLogger logger;
- preparser::PreParser::PreParseResult result =
- LazyParseFunctionLiteral(&logger);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
+ PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger);
+ if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
- stack_overflow_ = true;
+ set_stack_overflow();
*ok = false;
return NULL;
}
@@ -4505,9 +4405,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
body->Add(factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op,
fproxy,
- factory()->NewThisFunction(),
- RelocInfo::kNoPosition)),
- zone());
+ factory()->NewThisFunction(pos),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition), zone());
}
// For generators, allocate and yield an iterator on function entry.
@@ -4517,7 +4417,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CallRuntime* allocation = factory()->NewCallRuntime(
isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
- arguments);
+ arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
@@ -4526,7 +4426,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
current_function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(yield), zone());
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
}
ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
@@ -4535,10 +4436,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
VariableProxy* get_proxy = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression *undefined = factory()->NewLiteral(
- isolate()->factory()->undefined_value());
+ isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(
get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(yield), zone());
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
}
materialized_literal_count = function_state.materialized_literal_count();
@@ -4553,9 +4455,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (!top_scope_->is_classic_mode()) {
if (IsEvalOrArguments(function_name)) {
int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
+ int position = function_token_pos != RelocInfo::kNoPosition
+ ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location,
"strict_function_name", Vector<const char*>::empty());
@@ -4576,9 +4477,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (name_is_strict_reserved) {
int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
+ int position = function_token_pos != RelocInfo::kNoPosition
+ ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location, "strict_reserved_word",
Vector<const char*>::empty());
@@ -4615,8 +4515,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_type,
FunctionLiteral::kIsFunction,
parenthesized,
- generator);
- function_literal->set_function_token_position(function_token_position);
+ generator,
+ pos);
+ function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
function_literal->set_dont_optimize_reason(dont_optimize_reason);
@@ -4625,16 +4526,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
-preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
ASSERT_EQ(Token::LBRACE, scanner().current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
- reusable_preparser_ = new preparser::PreParser(&scanner_,
- NULL,
- stack_limit);
+ reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit);
reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
reusable_preparser_->set_allow_modules(allow_modules());
reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
@@ -4644,7 +4543,7 @@ preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
reusable_preparser_->set_allow_harmony_numeric_literals(
allow_harmony_numeric_literals());
}
- preparser::PreParser::PreParseResult result =
+ PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
is_generator(),
logger);
@@ -4656,6 +4555,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
+ int pos = peek_position();
Expect(Token::MOD, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
@@ -4701,11 +4601,11 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
// We have a valid intrinsics call or a call to a builtin.
- return factory()->NewCallRuntime(name, function, args);
+ return factory()->NewCallRuntime(name, function, args, pos);
}
-bool Parser::peek_any_identifier() {
+bool ParserBase::peek_any_identifier() {
Token::Value next = peek();
return next == Token::IDENTIFIER ||
next == Token::FUTURE_RESERVED_WORD ||
@@ -4714,35 +4614,9 @@ bool Parser::peek_any_identifier() {
}
-void Parser::Consume(Token::Value token) {
- Token::Value next = Next();
- USE(next);
- USE(token);
- ASSERT(next == token);
-}
-
-
-void Parser::Expect(Token::Value token, bool* ok) {
- Token::Value next = Next();
- if (next == token) return;
- ReportUnexpectedToken(next);
- *ok = false;
-}
-
-
-bool Parser::Check(Token::Value token) {
- Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
-}
-
-
-bool Parser::CheckContextualKeyword(Vector<const char> keyword) {
+bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
if (peek() == Token::IDENTIFIER &&
- scanner().is_next_contextual_keyword(keyword)) {
+ scanner()->is_next_contextual_keyword(keyword)) {
Consume(Token::IDENTIFIER);
return true;
}
@@ -4750,7 +4624,7 @@ bool Parser::CheckContextualKeyword(Vector<const char> keyword) {
}
-void Parser::ExpectSemicolon(bool* ok) {
+void ParserBase::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
Token::Value tok = peek();
@@ -4758,7 +4632,7 @@ void Parser::ExpectSemicolon(bool* ok) {
Next();
return;
}
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@@ -4767,23 +4641,25 @@ void Parser::ExpectSemicolon(bool* ok) {
}
-void Parser::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
- if (!scanner().is_literal_contextual_keyword(keyword)) {
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
}
}
-Literal* Parser::GetLiteralUndefined() {
- return factory()->NewLiteral(isolate()->factory()->undefined_value());
+Literal* Parser::GetLiteralUndefined(int position) {
+ return factory()->NewLiteral(
+ isolate()->factory()->undefined_value(), position);
}
-Literal* Parser::GetLiteralTheHole() {
- return factory()->NewLiteral(isolate()->factory()->the_hole_value());
+Literal* Parser::GetLiteralTheHole(int position) {
+ return factory()->NewLiteral(
+ isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition);
}
@@ -4865,14 +4741,11 @@ void Parser::CheckStrictModeLValue(Expression* expression,
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode.
-void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner().octal_position();
- if (octal.IsValid() &&
- beg_pos <= octal.beg_pos &&
- octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal",
- Vector<const char*>::empty());
- scanner().clear_octal_position();
+void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
*ok = false;
}
}
@@ -5012,12 +4885,13 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
elements, FAST_ELEMENTS, TENURED);
+ int pos = position();
ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewLiteral(message), zone());
- args->Add(factory()->NewLiteral(array), zone());
+ args->Add(factory()->NewLiteral(message, pos), zone());
+ args->Add(factory()->NewLiteral(array, pos), zone());
CallRuntime* call_constructor =
- factory()->NewCallRuntime(constructor, NULL, args);
- return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
+ factory()->NewCallRuntime(constructor, NULL, args, pos);
+ return factory()->NewThrow(call_constructor, pos);
}
@@ -5907,15 +5781,15 @@ ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate,
HistogramTimerScope timer(isolate->counters()->pre_parse());
Scanner scanner(isolate->unicode_cache());
intptr_t stack_limit = isolate->stack_guard()->real_climit();
- preparser::PreParser preparser(&scanner, &recorder, stack_limit);
+ PreParser preparser(&scanner, &recorder, stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_generators(FLAG_harmony_generators);
preparser.set_allow_for_of(FLAG_harmony_iteration);
preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
scanner.Initialize(source);
- preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
- if (result == preparser::PreParser::kPreParseStackOverflow) {
+ PreParser::PreParseResult result = preparser.PreParseProgram();
+ if (result == PreParser::kPreParseStackOverflow) {
isolate->StackOverflow();
return NULL;
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 783626ad19..79ce68b615 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -425,7 +425,7 @@ class RegExpParser BASE_EMBEDDED {
// Forward declaration.
class SingletonLogger;
-class Parser BASE_EMBEDDED {
+class Parser : public ParserBase {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -433,44 +433,12 @@ class Parser BASE_EMBEDDED {
reusable_preparser_ = NULL;
}
- bool allow_natives_syntax() const { return allow_natives_syntax_; }
- bool allow_lazy() const { return allow_lazy_; }
- bool allow_modules() { return scanner().HarmonyModules(); }
- bool allow_harmony_scoping() { return scanner().HarmonyScoping(); }
- bool allow_generators() const { return allow_generators_; }
- bool allow_for_of() const { return allow_for_of_; }
- bool allow_harmony_numeric_literals() {
- return scanner().HarmonyNumericLiterals();
- }
-
- void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
- void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
- void set_allow_modules(bool allow) { scanner().SetHarmonyModules(allow); }
- void set_allow_harmony_scoping(bool allow) {
- scanner().SetHarmonyScoping(allow);
- }
- void set_allow_generators(bool allow) { allow_generators_ = allow; }
- void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
- void set_allow_harmony_numeric_literals(bool allow) {
- scanner().SetHarmonyNumericLiterals(allow);
- }
-
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
bool Parse();
- // Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram();
-
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
-
private:
static const int kMaxNumFunctionLocals = 131071; // 2^17-1
@@ -568,6 +536,9 @@ class Parser BASE_EMBEDDED {
Mode old_mode_;
};
+ // Returns NULL if parsing failed.
+ FunctionLiteral* ParseProgram();
+
FunctionLiteral* ParseLazy();
FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
@@ -584,6 +555,15 @@ class Parser BASE_EMBEDDED {
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
void ReportMessage(const char* message, Vector<const char*> args);
void ReportMessage(const char* message, Vector<Handle<String> > args);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, Vector<const char*>::empty());
+ }
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<const char*> args);
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<Handle<String> > args);
void set_pre_parse_data(ScriptDataImpl *data) {
pre_parse_data_ = data;
@@ -671,7 +651,6 @@ class Parser BASE_EMBEDDED {
Expression* ParsePrimaryExpression(bool* ok);
Expression* ParseArrayLiteral(bool* ok);
Expression* ParseObjectLiteral(bool* ok);
- ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
// Populate the constant properties fixed array for a materialized object
@@ -711,40 +690,10 @@ class Parser BASE_EMBEDDED {
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- INLINE(Token::Value peek()) {
- if (stack_overflow_) return Token::ILLEGAL;
- return scanner().peek();
- }
-
- INLINE(Token::Value Next()) {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- if (stack_overflow_) {
- return Token::ILLEGAL;
- }
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- // Any further calls to Next or peek will return the illegal token.
- // The current call must return the next token, which might already
- // have been peek'ed.
- stack_overflow_ = true;
- }
- return scanner().Next();
- }
-
bool is_generator() const { return current_function_state_->is_generator(); }
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
- bool peek_any_identifier();
-
- INLINE(void Consume(Token::Value token));
- void Expect(Token::Value token, bool* ok);
- bool Check(Token::Value token);
- void ExpectSemicolon(bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword);
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
-
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
return isolate_->factory()->NewStringFromAscii(
@@ -768,8 +717,8 @@ class Parser BASE_EMBEDDED {
Handle<String> GetSymbol();
// Get odd-ball literals.
- Literal* GetLiteralUndefined();
- Literal* GetLiteralTheHole();
+ Literal* GetLiteralUndefined(int position);
+ Literal* GetLiteralTheHole(int position);
Handle<String> ParseIdentifier(bool* ok);
Handle<String> ParseIdentifierOrStrictReservedWord(
@@ -789,9 +738,6 @@ class Parser BASE_EMBEDDED {
const char* error,
bool* ok);
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
// For harmony block scoping mode: Check if the scope has conflicting var/let
// declarations from different scopes. It covers for example
//
@@ -842,7 +788,7 @@ class Parser BASE_EMBEDDED {
Handle<String> type,
Vector< Handle<Object> > arguments);
- preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
+ PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
AstNodeFactory<AstConstructionVisitor>* factory() {
@@ -854,7 +800,7 @@ class Parser BASE_EMBEDDED {
Handle<Script> script_;
Scanner scanner_;
- preparser::PreParser* reusable_preparser_;
+ PreParser* reusable_preparser_;
Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
FunctionState* current_function_state_;
@@ -864,11 +810,6 @@ class Parser BASE_EMBEDDED {
FuncNameInferrer* fni_;
Mode mode_;
- bool allow_natives_syntax_;
- bool allow_lazy_;
- bool allow_generators_;
- bool allow_for_of_;
- bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
// Heuristically that means that the function will be called immediately,
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 4d3b1e313e..0076d567f8 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -41,7 +41,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
@@ -88,11 +87,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -205,12 +199,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin.
- return 0;
-}
-
-
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index d81827805a..103fd6ce05 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -43,7 +43,6 @@
#include <sys/fcntl.h> // open
#include <unistd.h> // getpagesize
// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -54,7 +53,6 @@
#include "v8.h"
#include "v8threads.h"
-#include "platform-posix.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -97,11 +95,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -199,10 +192,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index b8b96025e1..eb2d10b3f9 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -38,11 +38,6 @@
#include <sys/types.h>
#include <stdlib.h>
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <execinfo.h>
-#include <cxxabi.h>
-#endif
-
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
@@ -66,7 +61,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -154,14 +148,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-#endif
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -313,16 +299,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-#else
- return 0;
-#endif
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 67cc96f937..5ffc3fc54c 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -53,27 +53,15 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
-#include <cxxabi.h>
#undef MAP_TYPE
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "vm-state-inl.h"
-// Manually define these here as weak imports, rather than including execinfo.h.
-// This lets us launch on 10.4 which does not have these calls.
-extern "C" {
- extern int backtrace(void**, int) __attribute__((weak_import));
- extern char** backtrace_symbols(void* const*, int)
- __attribute__((weak_import));
- extern void backtrace_symbols_fd(void* const*, int, int)
- __attribute__((weak_import));
-}
-
namespace v8 {
namespace internal {
@@ -107,14 +95,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL) return;
-
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -220,14 +200,6 @@ double OS::LocalTimeOffset() {
}
-int OS::StackWalk(Vector<StackFrame> frames) {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL) return 0;
-
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
-
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 30a484f4b3..710c3904af 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -42,7 +42,6 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -51,7 +50,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -96,11 +94,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -231,34 +224,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index fe27eaf71f..797557d76f 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -29,8 +29,6 @@
// own but contains the parts which are the same across POSIX platforms Linux,
// Mac OS, FreeBSD and OpenBSD.
-#include "platform-posix.h"
-
#include <dlfcn.h>
#include <pthread.h>
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
@@ -102,6 +100,48 @@ intptr_t OS::MaxVirtualMemory() {
}
+uint64_t OS::TotalPhysicalMemory() {
+#if V8_OS_MACOSX
+ int mib[2];
+ mib[0] = CTL_HW;
+ mib[1] = HW_MEMSIZE;
+ int64_t size = 0;
+ size_t len = sizeof(size);
+ if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(size);
+#elif V8_OS_FREEBSD
+ int pages, page_size;
+ size_t size = sizeof(pages);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN
+ MEMORYSTATUS memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatus(&memory_info)) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(memory_info.dwTotalPhys);
+#else
+ intptr_t pages = sysconf(_SC_PHYS_PAGES);
+ intptr_t page_size = sysconf(_SC_PAGESIZE);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#endif
+}
+
+
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
diff --git a/deps/v8/src/platform-posix.h b/deps/v8/src/platform-posix.h
deleted file mode 100644
index 6b73387cd7..0000000000
--- a/deps/v8/src/platform-posix.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_POSIX_H_
-#define V8_PLATFORM_POSIX_H_
-
-#if !defined(ANDROID)
-#include <cxxabi.h>
-#endif
-#include <stdio.h>
-
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// Used by platform implementation files during OS::DumpBacktrace()
-// and OS::StackWalk().
-template<int (*backtrace)(void**, int),
- char** (*backtrace_symbols)(void* const*, int)>
-struct POSIXBacktraceHelper {
- static void DumpBacktrace() {
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {// NOLINT
- char* demangled = NULL;
-#if !defined(ANDROID)
- int status;
- size_t length;
- demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
-#endif
- fprintf(stderr, "%s\n", demangled != NULL ? demangled : mangled);
- free(demangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
- }
-
- static int StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return OS::kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
- "%s", symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_POSIX_H_
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index f082af1254..a0590cbecb 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -51,7 +51,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -112,11 +111,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -211,20 +205,6 @@ static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- ucontext_t ctx;
- struct StackWalker walker = { frames, 0 };
-
- if (getcontext(&ctx) < 0) return kStackWalkError;
-
- if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
- return kStackWalkError;
- }
-
- return walker.index;
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index ea4f7ea11f..35411bfdad 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -240,12 +240,16 @@ void MathSetup() {
class Win32Time {
public:
// Constructors.
+ Win32Time();
explicit Win32Time(double jstime);
Win32Time(int year, int mon, int day, int hour, int min, int sec);
// Convert timestamp to JavaScript representation.
double ToJSTime();
+ // Set timestamp to current time.
+ void SetToCurrentTime();
+
// Returns the local timezone offset in milliseconds east of UTC. This is
// the number of milliseconds you must add to UTC to get local time, i.e.
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
@@ -314,6 +318,12 @@ char Win32Time::std_tz_name_[kTzNameSize];
char Win32Time::dst_tz_name_[kTzNameSize];
+// Initialize timestamp to start of epoc.
+Win32Time::Win32Time() {
+ t() = 0;
+}
+
+
// Initialize timestamp from a JavaScript timestamp.
Win32Time::Win32Time(double jstime) {
t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
@@ -340,6 +350,62 @@ double Win32Time::ToJSTime() {
}
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+ // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+ // Because we're fast, we like fast timers which have at least a
+ // 1ms resolution.
+ //
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for v8 wants fast
+ // timers, it can use timeBeginPeriod to increase the resolution.
+ //
+ // Using timeGetTime() has a drawback because it is a 32bit value
+ // and hence rolls-over every ~49days.
+ //
+ // To use the clock, we use GetSystemTimeAsFileTime as our base;
+ // and then use timeGetTime to extrapolate current time from the
+ // start time. To deal with rollovers, we resync the clock
+ // any time when more than kMaxClockElapsedTime has passed or
+ // whenever timeGetTime creates a rollover.
+
+ static bool initialized = false;
+ static TimeStamp init_time;
+ static DWORD init_ticks;
+ static const int64_t kHundredNanosecondsPerSecond = 10000000;
+ static const int64_t kMaxClockElapsedTime =
+ 60*kHundredNanosecondsPerSecond; // 1 minute
+
+ // If we are uninitialized, we need to resync the clock.
+ bool needs_resync = !initialized;
+
+ // Get the current time.
+ TimeStamp time_now;
+ GetSystemTimeAsFileTime(&time_now.ft_);
+ DWORD ticks_now = timeGetTime();
+
+ // Check if we need to resync due to clock rollover.
+ needs_resync |= ticks_now < init_ticks;
+
+ // Check if we need to resync due to elapsed time.
+ needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+ // Check if we need to resync due to backwards time change.
+ needs_resync |= time_now.t_ < init_time.t_;
+
+ // Resync the clock if necessary.
+ if (needs_resync) {
+ GetSystemTimeAsFileTime(&init_time.ft_);
+ init_ticks = ticks_now = timeGetTime();
+ initialized = true;
+ }
+
+ // Finally, compute the actual time. Why is this so hard.
+ DWORD elapsed = ticks_now - init_ticks;
+ this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
// Guess the name of the timezone from the bias.
// The guess is very biased towards the northern hemisphere.
const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
@@ -891,11 +957,6 @@ void OS::DebugBreak() {
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file,
@@ -1208,133 +1269,21 @@ void OS::SignalCodeMovingGC() {
}
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- BOOL ok;
-
- // Load the required functions from DLL's.
- if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
- // Get the process and thread handles.
- HANDLE process_handle = GetCurrentProcess();
- HANDLE thread_handle = GetCurrentThread();
-
- // Read the symbols.
- if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
-
- // Capture current context.
- CONTEXT context;
- RtlCaptureContext(&context);
-
- // Initialize the stack walking
- STACKFRAME64 stack_frame;
- memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef _WIN64
- stack_frame.AddrPC.Offset = context.Rip;
- stack_frame.AddrFrame.Offset = context.Rbp;
- stack_frame.AddrStack.Offset = context.Rsp;
-#else
- stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrStack.Offset = context.Esp;
-#endif
- stack_frame.AddrPC.Mode = AddrModeFlat;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
- stack_frame.AddrStack.Mode = AddrModeFlat;
- int frames_count = 0;
-
- // Collect stack frames.
- int frames_size = frames.length();
- while (frames_count < frames_size) {
- ok = _StackWalk64(
- IMAGE_FILE_MACHINE_I386, // MachineType
- process_handle, // hProcess
- thread_handle, // hThread
- &stack_frame, // StackFrame
- &context, // ContextRecord
- NULL, // ReadMemoryRoutine
- _SymFunctionTableAccess64, // FunctionTableAccessRoutine
- _SymGetModuleBase64, // GetModuleBaseRoutine
- NULL); // TranslateAddress
- if (!ok) break;
-
- // Store the address.
- ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
- frames[frames_count].address =
- reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
- // Try to locate a symbol for this frame.
- DWORD64 symbol_displacement;
- SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
- NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
- if (symbol.is_empty()) return kStackWalkError; // Out of memory.
- memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
- (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
- ok = _SymGetSymFromAddr64(process_handle, // hProcess
- stack_frame.AddrPC.Offset, // Address
- &symbol_displacement, // Displacement
- *symbol); // Symbol
- if (ok) {
- // Try to locate more source information for the symbol.
- IMAGEHLP_LINE64 Line;
- memset(&Line, 0, sizeof(Line));
- Line.SizeOfStruct = sizeof(Line);
- DWORD line_displacement;
- ok = _SymGetLineFromAddr64(
- process_handle, // hProcess
- stack_frame.AddrPC.Offset, // dwAddr
- &line_displacement, // pdwDisplacement
- &Line); // Line
- // Format a text representation of the frame based on the information
- // available.
- if (ok) {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s %s:%d:%d",
- (*symbol)->Name, Line.FileName, Line.LineNumber,
- line_displacement);
- } else {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s",
- (*symbol)->Name);
- }
- // Make sure line termination is in place.
- frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
- } else {
- // No text representation of this frame
- frames[frames_count].text[0] = '\0';
-
- // Continue if we are just missing a module (for non C/C++ frames a
- // module will never be found).
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND) {
- break;
- }
- }
-
- frames_count++;
+uint64_t OS::TotalPhysicalMemory() {
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ UNREACHABLE();
+ return 0;
}
- // Return the number of frames filled in.
- return frames_count;
+ return static_cast<uint64_t>(memory_info.ullTotalPhys);
}
-// Restore warnings to previous settings.
-#pragma warning(pop)
-
#else // __MINGW32__
void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index ee8fb92910..8e524aeaf0 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -67,6 +67,8 @@ int signbit(double x);
int strncasecmp(const char* s1, const char* s2, int n);
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
#if V8_TARGET_ARCH_IA32
@@ -84,6 +86,8 @@ inline int lrint(double flt) {
return intgr;
}
+#endif // _MSC_VER < 1800
+
#endif // V8_CC_MSVC
namespace v8 {
@@ -252,9 +256,6 @@ class OS {
// Debug break.
static void DebugBreak();
- // Dump C++ current stack trace (only functional on Linux).
- static void DumpBacktrace();
-
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
@@ -264,8 +265,6 @@ class OS {
char text[kStackWalkMaxTextLen];
};
- static int StackWalk(Vector<StackFrame> frames);
-
class MemoryMappedFile {
public:
static MemoryMappedFile* open(const char* name);
@@ -303,6 +302,9 @@ class OS {
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // The total amount of physical memory available on the current system.
+ static uint64_t TotalPhysicalMemory();
+
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
static intptr_t MaxVirtualMemory();
diff --git a/deps/v8/src/platform/elapsed-timer.h b/deps/v8/src/platform/elapsed-timer.h
index 2311db2f52..b61b007605 100644
--- a/deps/v8/src/platform/elapsed-timer.h
+++ b/deps/v8/src/platform/elapsed-timer.h
@@ -28,8 +28,8 @@
#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
#define V8_PLATFORM_ELAPSED_TIMER_H_
-#include "checks.h"
-#include "platform/time.h"
+#include "../checks.h"
+#include "time.h"
namespace v8 {
namespace internal {
@@ -104,7 +104,7 @@ class ElapsedTimer V8_FINAL BASE_EMBEDDED {
private:
static V8_INLINE TimeTicks Now() {
- TimeTicks now = TimeTicks::HighResNow();
+ TimeTicks now = TimeTicks::HighResolutionNow();
ASSERT(!now.IsNull());
return now;
}
diff --git a/deps/v8/src/platform/mutex.h b/deps/v8/src/platform/mutex.h
index 0f899ca597..125e9d4860 100644
--- a/deps/v8/src/platform/mutex.h
+++ b/deps/v8/src/platform/mutex.h
@@ -28,9 +28,9 @@
#ifndef V8_PLATFORM_MUTEX_H_
#define V8_PLATFORM_MUTEX_H_
-#include "lazy-instance.h"
+#include "../lazy-instance.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "../win32-headers.h"
#endif
#if V8_OS_POSIX
diff --git a/deps/v8/src/platform/semaphore.h b/deps/v8/src/platform/semaphore.h
index 2cfa142111..0babe5fd65 100644
--- a/deps/v8/src/platform/semaphore.h
+++ b/deps/v8/src/platform/semaphore.h
@@ -28,9 +28,9 @@
#ifndef V8_PLATFORM_SEMAPHORE_H_
#define V8_PLATFORM_SEMAPHORE_H_
-#include "lazy-instance.h"
+#include "../lazy-instance.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "../win32-headers.h"
#endif
#if V8_OS_MACOSX
diff --git a/deps/v8/src/platform/time.cc b/deps/v8/src/platform/time.cc
index ea6dd2c0ba..de0ca16473 100644
--- a/deps/v8/src/platform/time.cc
+++ b/deps/v8/src/platform/time.cc
@@ -43,13 +43,6 @@
#include "win32-headers.h"
#endif
-#if V8_OS_WIN
-// Prototype for GetTickCount64() procedure.
-extern "C" {
-typedef ULONGLONG (WINAPI *GETTICKCOUNT64PROC)(void);
-}
-#endif
-
namespace v8 {
namespace internal {
@@ -175,43 +168,43 @@ struct timespec TimeDelta::ToTimespec() const {
// periodically resync the internal clock to the system clock.
class Clock V8_FINAL {
public:
- Clock() : initial_time_(CurrentWallclockTime()),
- initial_ticks_(TimeTicks::Now()) {}
+ Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
Time Now() {
- // This must be executed under lock.
- LockGuard<Mutex> lock_guard(&mutex_);
+ // Time between resampling the un-granular clock for this API (1 minute).
+ const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
- // Calculate the time elapsed since we started our timer.
- TimeDelta elapsed = TimeTicks::Now() - initial_ticks_;
+ LockGuard<Mutex> lock_guard(&mutex_);
- // Check if we don't need to synchronize with the wallclock yet.
- if (elapsed.InMicroseconds() <= kMaxMicrosecondsToAvoidDrift) {
- return initial_time_ + elapsed;
+ // Determine current time and ticks.
+ TimeTicks ticks = GetSystemTicks();
+ Time time = GetSystemTime();
+
+ // Check if we need to synchronize with the system clock due to a backwards
+ // time change or the amount of time elapsed.
+ TimeDelta elapsed = ticks - initial_ticks_;
+ if (time < initial_time_ || elapsed > kMaxElapsedTime) {
+ initial_ticks_ = ticks;
+ initial_time_ = time;
+ return time;
}
- // Resynchronize with the wallclock.
- initial_ticks_ = TimeTicks::Now();
- initial_time_ = CurrentWallclockTime();
- return initial_time_;
+ return initial_time_ + elapsed;
}
Time NowFromSystemTime() {
- // This must be executed under lock.
LockGuard<Mutex> lock_guard(&mutex_);
-
- // Resynchronize with the wallclock.
- initial_ticks_ = TimeTicks::Now();
- initial_time_ = CurrentWallclockTime();
+ initial_ticks_ = GetSystemTicks();
+ initial_time_ = GetSystemTime();
return initial_time_;
}
private:
- // Time between resampling the un-granular clock for this API (1 minute).
- static const int64_t kMaxMicrosecondsToAvoidDrift =
- Time::kMicrosecondsPerMinute;
+ static TimeTicks GetSystemTicks() {
+ return TimeTicks::Now();
+ }
- static Time CurrentWallclockTime() {
+ static Time GetSystemTime() {
FILETIME ft;
::GetSystemTimeAsFileTime(&ft);
return Time::FromFiletime(ft);
@@ -223,9 +216,9 @@ class Clock V8_FINAL {
};
-static LazyDynamicInstance<Clock,
- DefaultCreateTrait<Clock>,
- ThreadSafeInitOnceTrait>::type clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+static LazyStaticInstance<Clock,
+ DefaultConstructTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER;
Time Time::Now() {
@@ -388,6 +381,7 @@ class TickClock {
public:
virtual ~TickClock() {}
virtual int64_t Now() = 0;
+ virtual bool IsHighResolution() = 0;
};
@@ -440,42 +434,24 @@ class HighResolutionTickClock V8_FINAL : public TickClock {
int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
- // Make sure we never return 0 here, so that TimeTicks::HighResNow()
+ // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
// will never return 0.
return ticks + 1;
}
- private:
- int64_t ticks_per_second_;
-};
-
-
-// The GetTickCount64() API is what we actually want for the regular tick
-// clock, but this is only available starting with Windows Vista.
-class WindowsVistaTickClock V8_FINAL : public TickClock {
- public:
- explicit WindowsVistaTickClock(GETTICKCOUNT64PROC func) : func_(func) {
- ASSERT(func_ != NULL);
- }
- virtual ~WindowsVistaTickClock() {}
-
- virtual int64_t Now() V8_OVERRIDE {
- // Query the current ticks (in ms).
- ULONGLONG tick_count_ms = (*func_)();
-
- // Convert to microseconds (make sure to never return 0 here).
- return (tick_count_ms * Time::kMicrosecondsPerMillisecond) + 1;
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return true;
}
private:
- GETTICKCOUNT64PROC func_;
+ int64_t ticks_per_second_;
};
class RolloverProtectedTickClock V8_FINAL : public TickClock {
public:
// We initialize rollover_ms_ to 1 to ensure that we will never
- // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
+ // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
virtual ~RolloverProtectedTickClock() {}
@@ -487,6 +463,9 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
// Note that we do not use GetTickCount() here, since timeGetTime() gives
// more predictable delta values, as described here:
// http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for V8 wants fast timers, it
+ // can use timeBeginPeriod() to increase the resolution.
DWORD now = timeGetTime();
if (now < last_seen_now_) {
rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
@@ -495,6 +474,10 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
}
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return false;
+ }
+
private:
Mutex mutex_;
DWORD last_seen_now_;
@@ -502,27 +485,10 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
};
-struct CreateTickClockTrait {
- static TickClock* Create() {
- // Try to load GetTickCount64() from kernel32.dll (available since Vista).
- HMODULE kernel32 = ::GetModuleHandleA("kernel32.dll");
- ASSERT(kernel32 != NULL);
- FARPROC proc = ::GetProcAddress(kernel32, "GetTickCount64");
- if (proc != NULL) {
- return new WindowsVistaTickClock(
- reinterpret_cast<GETTICKCOUNT64PROC>(proc));
- }
-
- // Fallback to the rollover protected tick clock.
- return new RolloverProtectedTickClock;
- }
-};
-
-
-static LazyDynamicInstance<TickClock,
- CreateTickClockTrait,
+static LazyStaticInstance<RolloverProtectedTickClock,
+ DefaultConstructTrait<RolloverProtectedTickClock>,
ThreadSafeInitOnceTrait>::type tick_clock =
- LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+ LAZY_STATIC_INSTANCE_INITIALIZER;
struct CreateHighResTickClockTrait {
@@ -560,21 +526,27 @@ TimeTicks TimeTicks::Now() {
}
-TimeTicks TimeTicks::HighResNow() {
+TimeTicks TimeTicks::HighResolutionNow() {
// Make sure we never return 0 here.
TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
ASSERT(!ticks.IsNull());
return ticks;
}
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return high_res_tick_clock.Pointer()->IsHighResolution();
+}
+
#else // V8_OS_WIN
TimeTicks TimeTicks::Now() {
- return HighResNow();
+ return HighResolutionNow();
}
-TimeTicks TimeTicks::HighResNow() {
+TimeTicks TimeTicks::HighResolutionNow() {
int64_t ticks;
#if V8_OS_MACOSX
static struct mach_timebase_info info;
@@ -608,6 +580,12 @@ TimeTicks TimeTicks::HighResNow() {
return TimeTicks(ticks + 1);
}
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return true;
+}
+
#endif // V8_OS_WIN
} } // namespace v8::internal
diff --git a/deps/v8/src/platform/time.h b/deps/v8/src/platform/time.h
index 2ce6cdd3e9..877e0203bb 100644
--- a/deps/v8/src/platform/time.h
+++ b/deps/v8/src/platform/time.h
@@ -31,7 +31,7 @@
#include <ctime>
#include <limits>
-#include "allocation.h"
+#include "../allocation.h"
// Forward declarations.
extern "C" {
@@ -333,7 +333,10 @@ class TimeTicks V8_FINAL BASE_EMBEDDED {
// resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
// SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
// This method never returns a null TimeTicks.
- static TimeTicks HighResNow();
+ static TimeTicks HighResolutionNow();
+
+ // Returns true if the high-resolution clock is working on this system.
+ static bool IsHighResolutionClockWorking();
// Returns true if this object has not been initialized.
bool IsNull() const { return ticks_ == 0; }
diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc
deleted file mode 100644
index 462dfe2290..0000000000
--- a/deps/v8/src/preparser-api.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef _MSC_VER
-#define V8_WIN32_LEAN_AND_MEAN
-#include "win32-headers.h"
-#endif
-
-#include "../include/v8-preparser.h"
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-#include "hashmap.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace internal {
-
-// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUtf16Buffer : public Utf16CharacterStream {
- public:
- /* The InputStreamUtf16Buffer maintains an internal buffer
- * that is filled in chunks from the Utf16CharacterStream.
- * It also maintains unlimited pushback capability, but optimized
- * for small pushbacks.
- * The pushback_buffer_ pointer points to the limit of pushbacks
- * in the current buffer. There is room for a few pushback'ed chars before
- * the buffer containing the most recently read chunk. If this is overflowed,
- * an external buffer is allocated/reused to hold further pushbacks, and
- * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
- * new buffer. When this buffer is read to the end again, the cursor is
- * switched back to the internal buffer
- */
- explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
- : Utf16CharacterStream(),
- stream_(stream),
- pushback_buffer_(buffer_),
- pushback_buffer_end_cache_(NULL),
- pushback_buffer_backing_(NULL),
- pushback_buffer_backing_size_(0) {
- buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
- }
-
- virtual ~InputStreamUtf16Buffer() {
- if (pushback_buffer_backing_ != NULL) {
- DeleteArray(pushback_buffer_backing_);
- }
- }
-
- virtual void PushBack(uc32 ch) {
- ASSERT(pos_ > 0);
- if (ch == kEndOfInput) {
- pos_--;
- return;
- }
- if (buffer_cursor_ <= pushback_buffer_) {
- // No more room in the current buffer to do pushbacks.
- if (pushback_buffer_end_cache_ == NULL) {
- // We have overflowed the pushback space at the beginning of buffer_.
- // Switch to using a separate allocated pushback buffer.
- if (pushback_buffer_backing_ == NULL) {
- // Allocate a buffer the first time we need it.
- pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
- pushback_buffer_backing_size_ = kPushBackSize;
- }
- pushback_buffer_ = pushback_buffer_backing_;
- pushback_buffer_end_cache_ = buffer_end_;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- buffer_cursor_ = buffer_end_ - 1;
- } else {
- // Hit the bottom of the allocated pushback buffer.
- // Double the buffer and continue.
- uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- OS::MemCopy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
- DeleteArray(pushback_buffer_backing_);
- buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
- pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- }
- }
- pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
- static_cast<uc16>(ch);
- pos_--;
- }
-
- protected:
- virtual bool ReadBlock() {
- if (pushback_buffer_end_cache_ != NULL) {
- buffer_cursor_ = buffer_;
- buffer_end_ = pushback_buffer_end_cache_;
- pushback_buffer_end_cache_ = NULL;
- return buffer_end_ > buffer_cursor_;
- }
- // Copy the top of the buffer into the pushback area.
- int32_t value;
- uc16* buffer_start = buffer_ + kPushBackSize;
- buffer_cursor_ = buffer_end_ = buffer_start;
- while ((value = stream_->Next()) >= 0) {
- if (value >
- static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::LeadSurrogate(value);
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::TrailSurrogate(value);
- } else {
- // buffer_end_ is a const pointer, but buffer_ is writable.
- buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
- }
- // Stop one before the end of the buffer in case we get a surrogate pair.
- if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
- }
- return buffer_end_ > buffer_start;
- }
-
- virtual unsigned SlowSeekForward(unsigned pos) {
- // Seeking in the input is not used by preparsing.
- // It's only used by the real parser based on preparser data.
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackSize = 16;
- v8::UnicodeInputStream* const stream_;
- // Buffer holding first kPushBackSize characters of pushback buffer,
- // then kBufferSize chars of read-ahead.
- // The pushback buffer is only used if pushing back characters past
- // the start of a block.
- uc16 buffer_[kPushBackSize + kBufferSize];
- // Limit of pushbacks before new allocation is necessary.
- uc16* pushback_buffer_;
- // Only if that pushback buffer at the start of buffer_ isn't sufficient
- // is the following used.
- const uc16* pushback_buffer_end_cache_;
- uc16* pushback_buffer_backing_;
- unsigned pushback_buffer_backing_size_;
-};
-
-} // namespace internal.
-
-
-UnicodeInputStream::~UnicodeInputStream() { }
-
-
-PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
- internal::InputStreamUtf16Buffer buffer(input);
- uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::UnicodeCache unicode_cache;
- internal::Scanner scanner(&unicode_cache);
- scanner.Initialize(&buffer);
- internal::CompleteParserRecorder recorder;
- preparser::PreParser preparser(&scanner, &recorder, stack_limit);
- preparser.set_allow_lazy(true);
- preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- return PreParserData::StackOverflow();
- }
- internal::Vector<unsigned> pre_data = recorder.ExtractData();
- size_t size = pre_data.length() * sizeof(pre_data[0]);
- unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start());
- return PreParserData(size, data);
-}
-
-} // namespace v8.
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 36a94a3315..a87c434558 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -42,10 +42,10 @@
#include "unicode.h"
#include "utils.h"
-#ifdef _MSC_VER
+#if V8_CC_MSVC && (_MSC_VER < 1800)
namespace std {
-// Usually defined in math.h, but not in MSVC.
+// Usually defined in math.h, but not in MSVC until VS2013+.
// Abstracted to work
int isfinite(double value);
@@ -53,28 +53,27 @@ int isfinite(double value);
#endif
namespace v8 {
-
-namespace preparser {
+namespace internal {
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- i::LanguageMode mode, bool is_generator, i::ParserRecorder* log) {
+ LanguageMode mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
Scope top_scope(&scope_, kTopLevelScope);
set_language_mode(mode);
Scope function_scope(&scope_, kFunctionScope);
function_scope.set_is_generator(is_generator);
- ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
+ int start_position = peek_position();
ParseLazyFunctionLiteralBody(&ok);
- if (stack_overflow_) return kPreParseStackOverflow;
+ if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
+ ReportUnexpectedToken(scanner()->current_token());
} else {
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+ ASSERT_EQ(Token::RBRACE, scanner()->peek());
if (!is_classic_mode()) {
- int end_pos = scanner_->location().end_pos;
+ int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
if (ok) {
CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
@@ -98,50 +97,38 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-void PreParser::ReportUnexpectedToken(i::Token::Value token) {
+void PreParser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
// over, in ParseProgram.
- if (token == i::Token::ILLEGAL && stack_overflow_) {
+ if (token == Token::ILLEGAL && stack_overflow()) {
return;
}
- i::Scanner::Location source_location = scanner_->location();
+ Scanner::Location source_location = scanner()->location();
// Four of the tokens are treated specially
switch (token) {
- case i::Token::EOS:
+ case Token::EOS:
return ReportMessageAt(source_location, "unexpected_eos", NULL);
- case i::Token::NUMBER:
+ case Token::NUMBER:
return ReportMessageAt(source_location, "unexpected_token_number", NULL);
- case i::Token::STRING:
+ case Token::STRING:
return ReportMessageAt(source_location, "unexpected_token_string", NULL);
- case i::Token::IDENTIFIER:
+ case Token::IDENTIFIER:
return ReportMessageAt(source_location,
"unexpected_token_identifier", NULL);
- case i::Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location, "unexpected_reserved", NULL);
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
return ReportMessageAt(source_location,
"unexpected_strict_reserved", NULL);
default:
- const char* name = i::Token::String(token);
+ const char* name = Token::String(token);
ReportMessageAt(source_location, "unexpected_token", name);
}
}
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
-void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- i::Scanner::Location octal = scanner_->octal_position();
- if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal", NULL);
- scanner_->clear_octal_position();
- *ok = false;
- }
-}
-
-
#define CHECK_OK ok); \
if (!*ok) return kUnknownSourceElements; \
((void)0
@@ -162,10 +149,10 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
// GeneratorDeclaration
switch (peek()) {
- case i::Token::FUNCTION:
+ case Token::FUNCTION:
return ParseFunctionDeclaration(ok);
- case i::Token::LET:
- case i::Token::CONST:
+ case Token::LET:
+ case Token::CONST:
return ParseVariableStatement(kSourceElement, ok);
default:
return ParseStatement(ok);
@@ -184,7 +171,7 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
if (allow_directive_prologue) {
if (statement.IsUseStrictLiteral()) {
set_language_mode(allow_harmony_scoping() ?
- i::EXTENDED_MODE : i::STRICT_MODE);
+ EXTENDED_MODE : STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
allow_directive_prologue = false;
}
@@ -229,55 +216,55 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
// Keep the source position of the statement
switch (peek()) {
- case i::Token::LBRACE:
+ case Token::LBRACE:
return ParseBlock(ok);
- case i::Token::CONST:
- case i::Token::LET:
- case i::Token::VAR:
+ case Token::CONST:
+ case Token::LET:
+ case Token::VAR:
return ParseVariableStatement(kStatement, ok);
- case i::Token::SEMICOLON:
+ case Token::SEMICOLON:
Next();
return Statement::Default();
- case i::Token::IF:
+ case Token::IF:
return ParseIfStatement(ok);
- case i::Token::DO:
+ case Token::DO:
return ParseDoWhileStatement(ok);
- case i::Token::WHILE:
+ case Token::WHILE:
return ParseWhileStatement(ok);
- case i::Token::FOR:
+ case Token::FOR:
return ParseForStatement(ok);
- case i::Token::CONTINUE:
+ case Token::CONTINUE:
return ParseContinueStatement(ok);
- case i::Token::BREAK:
+ case Token::BREAK:
return ParseBreakStatement(ok);
- case i::Token::RETURN:
+ case Token::RETURN:
return ParseReturnStatement(ok);
- case i::Token::WITH:
+ case Token::WITH:
return ParseWithStatement(ok);
- case i::Token::SWITCH:
+ case Token::SWITCH:
return ParseSwitchStatement(ok);
- case i::Token::THROW:
+ case Token::THROW:
return ParseThrowStatement(ok);
- case i::Token::TRY:
+ case Token::TRY:
return ParseTryStatement(ok);
- case i::Token::FUNCTION: {
- i::Scanner::Location start_location = scanner_->peek_location();
+ case Token::FUNCTION: {
+ Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
- i::Scanner::Location end_location = scanner_->location();
+ Scanner::Location end_location = scanner()->location();
if (!is_classic_mode()) {
ReportMessageAt(start_location.beg_pos, end_location.end_pos,
"strict_function", NULL);
@@ -288,7 +275,7 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
}
}
- case i::Token::DEBUGGER:
+ case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
default:
@@ -303,11 +290,11 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// GeneratorDeclaration ::
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
- Expect(i::Token::FUNCTION, CHECK_OK);
+ Expect(Token::FUNCTION, CHECK_OK);
- bool is_generator = allow_generators_ && Check(i::Token::MUL);
+ bool is_generator = allow_generators() && Check(Token::MUL);
Identifier identifier = ParseIdentifier(CHECK_OK);
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
Expression function_value = ParseFunctionLiteral(is_generator, CHECK_OK);
@@ -333,15 +320,15 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Note that a Block does not introduce a new execution scope!
// (ECMA-262, 3rd, 12.2)
//
- Expect(i::Token::LBRACE, CHECK_OK);
- while (peek() != i::Token::RBRACE) {
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
if (is_extended_mode()) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
}
}
- Expect(i::Token::RBRACE, ok);
+ Expect(Token::RBRACE, ok);
return Statement::Default();
}
@@ -385,9 +372,9 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
bool require_initializer = false;
- if (peek() == i::Token::VAR) {
- Consume(i::Token::VAR);
- } else if (peek() == i::Token::CONST) {
+ if (peek() == Token::VAR) {
+ Consume(Token::VAR);
+ } else if (peek() == Token::CONST) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
//
// ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
@@ -398,20 +385,20 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// However disallowing const in classic mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
- Consume(i::Token::CONST);
+ Consume(Token::CONST);
switch (language_mode()) {
- case i::CLASSIC_MODE:
+ case CLASSIC_MODE:
break;
- case i::STRICT_MODE: {
- i::Scanner::Location location = scanner_->peek_location();
+ case STRICT_MODE: {
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location, "strict_const", NULL);
*ok = false;
return Statement::Default();
}
- case i::EXTENDED_MODE:
+ case EXTENDED_MODE:
if (var_context != kSourceElement &&
var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"unprotected_const", NULL);
*ok = false;
@@ -420,7 +407,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
require_initializer = true;
break;
}
- } else if (peek() == i::Token::LET) {
+ } else if (peek() == Token::LET) {
// ES6 Draft Rev4 section 12.2.1:
//
// LetDeclaration : let LetBindingList ;
@@ -428,16 +415,16 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
if (!is_extended_mode()) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"illegal_let", NULL);
*ok = false;
return Statement::Default();
}
- Consume(i::Token::LET);
+ Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"unprotected_let", NULL);
*ok = false;
@@ -455,22 +442,22 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
int nvars = 0; // the number of variables declared
do {
// Parse variable name.
- if (nvars > 0) Consume(i::Token::COMMA);
+ if (nvars > 0) Consume(Token::COMMA);
Identifier identifier = ParseIdentifier(CHECK_OK);
if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_var_name",
identifier,
ok);
return Statement::Default();
}
nvars++;
- if (peek() == i::Token::ASSIGN || require_initializer) {
- Expect(i::Token::ASSIGN, CHECK_OK);
+ if (peek() == Token::ASSIGN || require_initializer) {
+ Expect(Token::ASSIGN, CHECK_OK);
ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
if (decl_props != NULL) *decl_props = kHasInitializers;
}
- } while (peek() == i::Token::COMMA);
+ } while (peek() == Token::COMMA);
if (num_decl != NULL) *num_decl = nvars;
return Statement::Default();
@@ -488,8 +475,8 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
ASSERT(is_classic_mode() ||
(!expr.AsIdentifier().IsFutureStrictReserved() &&
!expr.AsIdentifier().IsYield()));
- if (peek() == i::Token::COLON) {
- Consume(i::Token::COLON);
+ if (peek() == Token::COLON) {
+ Consume(Token::COLON);
return ParseStatement(ok);
}
// Preparsing is disabled for extensions (because the extension details
@@ -506,12 +493,12 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
- Expect(i::Token::IF, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::IF, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
- if (peek() == i::Token::ELSE) {
+ if (peek() == Token::ELSE) {
Next();
ParseStatement(CHECK_OK);
}
@@ -523,12 +510,12 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
// ContinueStatement ::
// 'continue' [no line terminator] Identifier? ';'
- Expect(i::Token::CONTINUE, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Expect(Token::CONTINUE, CHECK_OK);
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -540,12 +527,12 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
// BreakStatement ::
// 'break' [no line terminator] Identifier? ';'
- Expect(i::Token::BREAK, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Expect(Token::BREAK, CHECK_OK);
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -560,18 +547,18 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// Consume the return token. It is necessary to do the before
// reporting any errors on it, because of the way errors are
// reported (underlining).
- Expect(i::Token::RETURN, CHECK_OK);
+ Expect(Token::RETURN, CHECK_OK);
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
// function. See ECMA-262, section 12.9, page 67.
// This is not handled during preparsing.
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -582,16 +569,16 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
- Expect(i::Token::WITH, CHECK_OK);
+ Expect(Token::WITH, CHECK_OK);
if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location, "strict_mode_with", NULL);
*ok = false;
return Statement::Default();
}
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
Scope::InsideWith iw(scope_);
ParseStatement(CHECK_OK);
@@ -603,30 +590,30 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- Expect(i::Token::SWITCH, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::SWITCH, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
- Expect(i::Token::LBRACE, CHECK_OK);
- i::Token::Value token = peek();
- while (token != i::Token::RBRACE) {
- if (token == i::Token::CASE) {
- Expect(i::Token::CASE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+ Token::Value token = peek();
+ while (token != Token::RBRACE) {
+ if (token == Token::CASE) {
+ Expect(Token::CASE, CHECK_OK);
ParseExpression(true, CHECK_OK);
} else {
- Expect(i::Token::DEFAULT, CHECK_OK);
+ Expect(Token::DEFAULT, CHECK_OK);
}
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
token = peek();
- while (token != i::Token::CASE &&
- token != i::Token::DEFAULT &&
- token != i::Token::RBRACE) {
+ while (token != Token::CASE &&
+ token != Token::DEFAULT &&
+ token != Token::RBRACE) {
ParseStatement(CHECK_OK);
token = peek();
}
}
- Expect(i::Token::RBRACE, ok);
+ Expect(Token::RBRACE, ok);
return Statement::Default();
}
@@ -635,13 +622,13 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- Expect(i::Token::DO, CHECK_OK);
+ Expect(Token::DO, CHECK_OK);
ParseStatement(CHECK_OK);
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, ok);
- if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
+ Expect(Token::RPAREN, ok);
+ if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
return Statement::Default();
}
@@ -650,20 +637,19 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(ok);
return Statement::Default();
}
bool PreParser::CheckInOrOf(bool accept_OF) {
- if (peek() == i::Token::IN ||
- (allow_for_of() && accept_OF && peek() == i::Token::IDENTIFIER &&
- scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) {
- Next();
+ if (Check(Token::IN) ||
+ (allow_for_of() && accept_OF &&
+ CheckContextualKeyword(CStrVector("of")))) {
return true;
}
return false;
@@ -674,12 +660,12 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
- Expect(i::Token::FOR, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
- if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
- peek() == i::Token::LET) {
- bool is_let = peek() == i::Token::LET;
+ Expect(Token::FOR, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ if (peek() != Token::SEMICOLON) {
+ if (peek() == Token::VAR || peek() == Token::CONST ||
+ peek() == Token::LET) {
+ bool is_let = peek() == Token::LET;
int decl_count;
VariableDeclarationProperties decl_props = kHasNoInitializers;
ParseVariableDeclarations(
@@ -689,7 +675,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
bool accept_OF = !has_initializers;
if (accept_IN && CheckInOrOf(accept_OF)) {
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
return Statement::Default();
@@ -698,7 +684,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
Expression lhs = ParseExpression(false, CHECK_OK);
if (CheckInOrOf(lhs.IsIdentifier())) {
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
return Statement::Default();
@@ -707,17 +693,17 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
// Parsed initializer at this point.
- Expect(i::Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
+ if (peek() != Token::SEMICOLON) {
ParseExpression(true, CHECK_OK);
}
- Expect(i::Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != i::Token::RPAREN) {
+ if (peek() != Token::RPAREN) {
ParseExpression(true, CHECK_OK);
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(ok);
return Statement::Default();
@@ -728,9 +714,9 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
// ThrowStatement ::
// 'throw' [no line terminator] Expression ';'
- Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->HasAnyLineTerminatorBeforeNext()) {
- i::Scanner::Location pos = scanner_->location();
+ Expect(Token::THROW, CHECK_OK);
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ Scanner::Location pos = scanner()->location();
ReportMessageAt(pos, "newline_after_throw", NULL);
*ok = false;
return Statement::Default();
@@ -756,30 +742,30 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
// In preparsing, allow any number of catch/finally blocks, including zero
// of both.
- Expect(i::Token::TRY, CHECK_OK);
+ Expect(Token::TRY, CHECK_OK);
ParseBlock(CHECK_OK);
bool catch_or_finally_seen = false;
- if (peek() == i::Token::CATCH) {
- Consume(i::Token::CATCH);
- Expect(i::Token::LPAREN, CHECK_OK);
+ if (peek() == Token::CATCH) {
+ Consume(Token::CATCH);
+ Expect(Token::LPAREN, CHECK_OK);
Identifier id = ParseIdentifier(CHECK_OK);
if (!is_classic_mode() && !id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_catch_variable",
id,
ok);
return Statement::Default();
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
{ Scope::InsideWith iw(scope_);
ParseBlock(CHECK_OK);
}
catch_or_finally_seen = true;
}
- if (peek() == i::Token::FINALLY) {
- Consume(i::Token::FINALLY);
+ if (peek() == Token::FINALLY) {
+ Consume(Token::FINALLY);
ParseBlock(CHECK_OK);
catch_or_finally_seen = true;
}
@@ -797,7 +783,7 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
// DebuggerStatement ::
// 'debugger' ';'
- Expect(i::Token::DEBUGGER, CHECK_OK);
+ Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(ok);
return Statement::Default();
}
@@ -818,8 +804,8 @@ PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
// Expression ',' AssignmentExpression
Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == i::Token::COMMA) {
- Expect(i::Token::COMMA, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
result = Expression::Default();
}
@@ -835,14 +821,14 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- if (scope_->is_generator() && peek() == i::Token::YIELD) {
+ if (scope_->is_generator() && peek() == Token::YIELD) {
return ParseYieldExpression(ok);
}
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
- if (!i::Token::IsAssignmentOp(peek())) {
+ if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
return expression;
}
@@ -850,17 +836,17 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_assignment", NULL);
*ok = false;
return Expression::Default();
}
- i::Token::Value op = Next(); // Get assignment operator.
+ Token::Value op = Next(); // Get assignment operator.
ParseAssignmentExpression(accept_IN, CHECK_OK);
- if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
+ if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
scope_->AddProperty();
}
@@ -872,8 +858,8 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
PreParser::Expression PreParser::ParseYieldExpression(bool* ok) {
// YieldExpression ::
// 'yield' '*'? AssignmentExpression
- Consume(i::Token::YIELD);
- Check(i::Token::MUL);
+ Consume(Token::YIELD);
+ Check(Token::MUL);
ParseAssignmentExpression(false, CHECK_OK);
@@ -890,26 +876,18 @@ PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
// We start using the binary expression parser for prec >= 4 only!
Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != i::Token::CONDITIONAL) return expression;
- Consume(i::Token::CONDITIONAL);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ParseAssignmentExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
return Expression::Default();
}
-int PreParser::Precedence(i::Token::Value tok, bool accept_IN) {
- if (tok == i::Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return i::Token::Precedence(tok);
-}
-
-
// Precedence >= 4
PreParser::Expression PreParser::ParseBinaryExpression(int prec,
bool accept_IN,
@@ -940,19 +918,19 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
// '~' UnaryExpression
// '!' UnaryExpression
- i::Token::Value op = peek();
- if (i::Token::IsUnaryOp(op)) {
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
op = Next();
ParseUnaryExpression(ok);
return Expression::Default();
- } else if (i::Token::IsCountOp(op)) {
+ } else if (Token::IsCountOp(op)) {
op = Next();
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseUnaryExpression(CHECK_OK);
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_prefix", NULL);
*ok = false;
@@ -968,14 +946,14 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- i::Token::IsCountOp(peek())) {
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ Token::IsCountOp(peek())) {
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_postfix", NULL);
*ok = false;
@@ -993,7 +971,7 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
// (NewExpression | MemberExpression) ...
Expression result = Expression::Default();
- if (peek() == i::Token::NEW) {
+ if (peek() == Token::NEW) {
result = ParseNewExpression(CHECK_OK);
} else {
result = ParseMemberExpression(CHECK_OK);
@@ -1001,10 +979,10 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
while (true) {
switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
} else {
@@ -1013,14 +991,14 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
break;
}
- case i::Token::LPAREN: {
+ case Token::LPAREN: {
ParseArguments(CHECK_OK);
result = Expression::Default();
break;
}
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
@@ -1051,9 +1029,9 @@ PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
// lists as long as it has 'new' prefixes left
unsigned new_count = 0;
do {
- Consume(i::Token::NEW);
+ Consume(Token::NEW);
new_count++;
- } while (peek() == i::Token::NEW);
+ } while (peek() == Token::NEW);
return ParseMemberWithNewPrefixesExpression(new_count, ok);
}
@@ -1072,17 +1050,17 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
// Parse the initial primary or function expression.
Expression result = Expression::Default();
- if (peek() == i::Token::FUNCTION) {
- Consume(i::Token::FUNCTION);
+ if (peek() == Token::FUNCTION) {
+ Consume(Token::FUNCTION);
- bool is_generator = allow_generators_ && Check(i::Token::MUL);
+ bool is_generator = allow_generators() && Check(Token::MUL);
Identifier identifier = Identifier::Default();
if (peek_any_identifier()) {
identifier = ParseIdentifier(CHECK_OK);
}
result = ParseFunctionLiteral(is_generator, CHECK_OK);
if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_function_name",
identifier,
ok);
@@ -1094,10 +1072,10 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
while (true) {
switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
} else {
@@ -1105,8 +1083,8 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
}
break;
}
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
@@ -1115,7 +1093,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
}
break;
}
- case i::Token::LPAREN: {
+ case Token::LPAREN: {
if (new_count == 0) return result;
// Consume one of the new prefixes (already parsed).
ParseArguments(CHECK_OK);
@@ -1146,59 +1124,59 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
Expression result = Expression::Default();
switch (peek()) {
- case i::Token::THIS: {
+ case Token::THIS: {
Next();
result = Expression::This();
break;
}
- case i::Token::FUTURE_RESERVED_WORD:
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- case i::Token::YIELD:
- case i::Token::IDENTIFIER: {
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::YIELD:
+ case Token::IDENTIFIER: {
Identifier id = ParseIdentifier(CHECK_OK);
result = Expression::FromIdentifier(id);
break;
}
- case i::Token::NULL_LITERAL:
- case i::Token::TRUE_LITERAL:
- case i::Token::FALSE_LITERAL:
- case i::Token::NUMBER: {
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER: {
Next();
break;
}
- case i::Token::STRING: {
+ case Token::STRING: {
Next();
result = GetStringSymbol();
break;
}
- case i::Token::ASSIGN_DIV:
+ case Token::ASSIGN_DIV:
result = ParseRegExpLiteral(true, CHECK_OK);
break;
- case i::Token::DIV:
+ case Token::DIV:
result = ParseRegExpLiteral(false, CHECK_OK);
break;
- case i::Token::LBRACK:
+ case Token::LBRACK:
result = ParseArrayLiteral(CHECK_OK);
break;
- case i::Token::LBRACE:
+ case Token::LBRACE:
result = ParseObjectLiteral(CHECK_OK);
break;
- case i::Token::LPAREN:
- Consume(i::Token::LPAREN);
- parenthesized_function_ = (peek() == i::Token::FUNCTION);
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ parenthesized_function_ = (peek() == Token::FUNCTION);
result = ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
result = result.Parenthesize();
break;
- case i::Token::MOD:
+ case Token::MOD:
result = ParseV8Intrinsic(CHECK_OK);
break;
@@ -1216,54 +1194,21 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
- Expect(i::Token::LBRACK, CHECK_OK);
- while (peek() != i::Token::RBRACK) {
- if (peek() != i::Token::COMMA) {
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ if (peek() != Token::COMMA) {
ParseAssignmentExpression(true, CHECK_OK);
}
- if (peek() != i::Token::RBRACK) {
- Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
return Expression::Default();
}
-void PreParser::CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok) {
- int old_type;
- if (property == i::Token::NUMBER) {
- old_type = finder->AddNumber(scanner_->literal_ascii_string(), type);
- } else if (scanner_->is_literal_ascii()) {
- old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
- type);
- } else {
- old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
- }
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (is_classic_mode()) return;
- ReportMessageAt(scanner_->location(),
- "strict_duplicate_property", NULL);
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- ReportMessageAt(scanner_->location(),
- "accessor_data_property", NULL);
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- ReportMessageAt(scanner_->location(),
- "accessor_get_set", NULL);
- }
- *ok = false;
- }
-}
-
PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
@@ -1272,25 +1217,26 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
- Expect(i::Token::LBRACE, CHECK_OK);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
- while (peek() != i::Token::RBRACE) {
- i::Token::Value next = peek();
+ ObjectLiteralChecker checker(this, language_mode());
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Token::Value next = peek();
switch (next) {
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
- case i::Token::FUTURE_STRICT_RESERVED_WORD: {
+ case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != i::Token::COLON) {
- i::Token::Value name = Next();
- bool is_keyword = i::Token::IsKeyword(name);
- if (name != i::Token::IDENTIFIER &&
- name != i::Token::FUTURE_RESERVED_WORD &&
- name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- name != i::Token::NUMBER &&
- name != i::Token::STRING &&
+ if ((is_getter || is_setter) && peek() != Token::COLON) {
+ Token::Value name = Next();
+ bool is_keyword = Token::IsKeyword(name);
+ if (name != Token::IDENTIFIER &&
+ name != Token::FUTURE_RESERVED_WORD &&
+ name != Token::FUTURE_STRICT_RESERVED_WORD &&
+ name != Token::NUMBER &&
+ name != Token::STRING &&
!is_keyword) {
*ok = false;
return Expression::Default();
@@ -1298,30 +1244,30 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
if (!is_keyword) {
LogSymbol();
}
- PropertyType type = is_getter ? kGetterProperty : kSetterProperty;
- CheckDuplicate(&duplicate_finder, name, type, CHECK_OK);
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(name, type, CHECK_OK);
ParseFunctionLiteral(false, CHECK_OK);
- if (peek() != i::Token::RBRACE) {
- Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACE) {
+ Expect(Token::COMMA, CHECK_OK);
}
continue; // restart the while
}
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
break;
}
- case i::Token::STRING:
+ case Token::STRING:
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
GetStringSymbol();
break;
- case i::Token::NUMBER:
+ case Token::NUMBER:
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
break;
default:
- if (i::Token::IsKeyword(next)) {
+ if (Token::IsKeyword(next)) {
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
} else {
// Unexpected token.
*ok = false;
@@ -1329,13 +1275,13 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
}
}
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
ParseAssignmentExpression(true, CHECK_OK);
// TODO(1240767): Consider allowing trailing comma.
- if (peek() != i::Token::RBRACE) Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
}
- Expect(i::Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
return Expression::Default();
@@ -1344,18 +1290,18 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
bool* ok) {
- if (!scanner_->ScanRegExpPattern(seen_equal)) {
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
Next();
- ReportMessageAt(scanner_->location(), "unterminated_regexp", NULL);
+ ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
*ok = false;
return Expression::Default();
}
scope_->NextMaterializedLiteralIndex();
- if (!scanner_->ScanRegExpFlags()) {
+ if (!scanner()->ScanRegExpFlags()) {
Next();
- ReportMessageAt(scanner_->location(), "invalid_regexp_flags", NULL);
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
*ok = false;
return Expression::Default();
}
@@ -1368,21 +1314,21 @@ PreParser::Arguments PreParser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- Expect(i::Token::LPAREN, ok);
+ Expect(Token::LPAREN, ok);
if (!*ok) return -1;
- bool done = (peek() == i::Token::RPAREN);
+ bool done = (peek() == Token::RPAREN);
int argc = 0;
while (!done) {
ParseAssignmentExpression(true, ok);
if (!*ok) return -1;
argc++;
- done = (peek() == i::Token::RPAREN);
+ done = (peek() == Token::RPAREN);
if (!done) {
- Expect(i::Token::COMMA, ok);
+ Expect(Token::COMMA, ok);
if (!*ok) return -1;
}
}
- Expect(i::Token::RPAREN, ok);
+ Expect(Token::RPAREN, ok);
return argc;
}
@@ -1399,57 +1345,57 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
function_scope.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
- Expect(i::Token::LPAREN, CHECK_OK);
- int start_position = scanner_->location().beg_pos;
- bool done = (peek() == i::Token::RPAREN);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
+ Expect(Token::LPAREN, CHECK_OK);
+ int start_position = position();
+ bool done = (peek() == Token::RPAREN);
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
while (!done) {
Identifier id = ParseIdentifier(CHECK_OK);
if (!id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_param_name",
id,
CHECK_OK);
}
int prev_value;
- if (scanner_->is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
prev_value =
- duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
+ duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1);
} else {
prev_value =
- duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
+ duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
}
if (prev_value != 0) {
- SetStrictModeViolation(scanner_->location(),
+ SetStrictModeViolation(scanner()->location(),
"strict_param_dupe",
CHECK_OK);
}
- done = (peek() == i::Token::RPAREN);
+ done = (peek() == Token::RPAREN);
if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy_ &&
+ !inside_with && allow_lazy() &&
!parenthesized_function_);
parenthesized_function_ = false;
- Expect(i::Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
if (is_lazily_compiled) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
- ParseSourceElements(i::Token::RBRACE, ok);
+ ParseSourceElements(Token::RBRACE, ok);
}
- Expect(i::Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
if (!is_classic_mode()) {
- int end_position = scanner_->location().end_pos;
+ int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
return Expression::StrictFunction();
@@ -1460,15 +1406,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
- int body_start = scanner_->location().beg_pos;
+ int body_start = position();
log_->PauseRecording();
- ParseSourceElements(i::Token::RBRACE, ok);
+ ParseSourceElements(Token::RBRACE, ok);
log_->ResumeRecording();
if (!*ok) return;
// Position right after terminal '}'.
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
- int body_end = scanner_->peek_location().end_pos;
+ ASSERT_EQ(Token::RBRACE, scanner()->peek());
+ int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
scope_->materialized_literal_count(),
scope_->expected_properties(),
@@ -1479,8 +1425,8 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
- Expect(i::Token::MOD, CHECK_OK);
- if (!allow_natives_syntax_) {
+ Expect(Token::MOD, CHECK_OK);
+ if (!allow_natives_syntax()) {
*ok = false;
return Expression::Default();
}
@@ -1493,29 +1439,12 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
#undef CHECK_OK
-void PreParser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- i::Token::Value tok = peek();
- if (tok == i::Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner_->HasAnyLineTerminatorBeforeNext() ||
- tok == i::Token::RBRACE ||
- tok == i::Token::EOS) {
- return;
- }
- Expect(i::Token::SEMICOLON, ok);
-}
-
-
void PreParser::LogSymbol() {
- int identifier_pos = scanner_->location().beg_pos;
- if (scanner_->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
+ int identifier_pos = position();
+ if (scanner()->is_literal_ascii()) {
+ log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string());
} else {
- log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
+ log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string());
}
}
@@ -1524,10 +1453,10 @@ PreParser::Expression PreParser::GetStringSymbol() {
const int kUseStrictLength = 10;
const char* kUseStrictChars = "use strict";
LogSymbol();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == kUseStrictLength &&
- !scanner_->literal_contains_escapes() &&
- !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == kUseStrictLength &&
+ !scanner()->literal_contains_escapes() &&
+ !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
kUseStrictLength)) {
return Expression::UseStrictStringLiteral();
}
@@ -1537,22 +1466,22 @@ PreParser::Expression PreParser::GetStringSymbol() {
PreParser::Identifier PreParser::GetIdentifierSymbol() {
LogSymbol();
- if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
+ if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
return Identifier::FutureReserved();
- } else if (scanner_->current_token() ==
- i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ } else if (scanner()->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
return Identifier::FutureStrictReserved();
- } else if (scanner_->current_token() == i::Token::YIELD) {
+ } else if (scanner()->current_token() == Token::YIELD) {
return Identifier::Yield();
}
- if (scanner_->is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
// Detect strict-mode poison words.
- if (scanner_->literal_length() == 4 &&
- !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
+ if (scanner()->literal_length() == 4 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
return Identifier::Eval();
}
- if (scanner_->literal_length() == 9 &&
- !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
+ if (scanner()->literal_length() == 9 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
return Identifier::Arguments();
}
}
@@ -1561,32 +1490,32 @@ PreParser::Identifier PreParser::GetIdentifierSymbol() {
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- i::Token::Value next = Next();
+ Token::Value next = Next();
switch (next) {
- case i::Token::FUTURE_RESERVED_WORD: {
- i::Scanner::Location location = scanner_->location();
+ case Token::FUTURE_RESERVED_WORD: {
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location.beg_pos, location.end_pos,
"reserved_word", NULL);
*ok = false;
return GetIdentifierSymbol();
}
- case i::Token::YIELD:
+ case Token::YIELD:
if (scope_->is_generator()) {
// 'yield' in a generator is only valid as part of a YieldExpression.
- ReportMessageAt(scanner_->location(), "unexpected_token", "yield");
+ ReportMessageAt(scanner()->location(), "unexpected_token", "yield");
*ok = false;
return Identifier::Yield();
}
// FALLTHROUGH
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location.beg_pos, location.end_pos,
"strict_reserved_word", NULL);
*ok = false;
}
// FALLTHROUGH
- case i::Token::IDENTIFIER:
+ case Token::IDENTIFIER:
return GetIdentifierSymbol();
default:
*ok = false;
@@ -1595,7 +1524,7 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
}
-void PreParser::SetStrictModeViolation(i::Scanner::Location location,
+void PreParser::SetStrictModeViolation(Scanner::Location location,
const char* type,
bool* ok) {
if (!is_classic_mode()) {
@@ -1619,7 +1548,7 @@ void PreParser::SetStrictModeViolation(i::Scanner::Location location,
void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
int end_pos,
bool* ok) {
- i::Scanner::Location location = strict_mode_violation_location_;
+ Scanner::Location location = strict_mode_violation_location_;
if (location.IsValid() &&
location.beg_pos > beg_pos && location.end_pos < end_pos) {
ReportMessageAt(location, strict_mode_violation_type_, NULL);
@@ -1628,7 +1557,7 @@ void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
}
-void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
+void PreParser::StrictModeIdentifierViolation(Scanner::Location location,
const char* eval_args_type,
Identifier identifier,
bool* ok) {
@@ -1649,17 +1578,16 @@ void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- i::Token::Value next = Next();
- if (i::Token::IsKeyword(next)) {
- int pos = scanner_->location().beg_pos;
- const char* keyword = i::Token::String(next);
- log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
- i::StrLength(keyword)));
+ Token::Value next = Next();
+ if (Token::IsKeyword(next)) {
+ int pos = position();
+ const char* keyword = Token::String(next);
+ log_->LogAsciiSymbol(pos, Vector<const char>(keyword, StrLength(keyword)));
return Identifier::Default();
}
- if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ if (next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
@@ -1676,9 +1604,9 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
bool* ok) {
Identifier result = ParseIdentifierName(ok);
if (!*ok) return Identifier::Default();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == 3) {
- const char* token = scanner_->literal_ascii_string().start();
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == 3) {
+ const char* token = scanner()->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
@@ -1686,147 +1614,36 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
}
-bool PreParser::peek_any_identifier() {
- i::Token::Value next = peek();
- return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD ||
- next == i::Token::YIELD;
-}
-
-
-int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), true, value);
-}
-
-
-int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), false, value);
-}
-
-int DuplicateFinder::AddSymbol(i::Vector<const byte> key,
- bool is_ascii,
- int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
- i::HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
- int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- entry->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
- return old_value;
-}
-
-
-int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) {
- ASSERT(key.length() > 0);
- // Quick check for already being in canonical form.
- if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
- }
-
- int flags = i::ALLOW_HEX | i::ALLOW_OCTAL | i::ALLOW_IMPLICIT_OCTAL |
- i::ALLOW_BINARY;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
- int length;
- const char* string;
- if (!std::isfinite(double_value)) {
- string = "Infinity";
- length = 8; // strlen("Infinity");
- } else {
- string = DoubleToCString(double_value,
- i::Vector<char>(number_buffer_, kBufferSize));
- length = i::StrLength(string);
- }
- return AddSymbol(i::Vector<const byte>(reinterpret_cast<const byte*>(string),
- length), true, value);
-}
-
-
-bool DuplicateFinder::IsNumberCanonical(i::Vector<const char> number) {
- // Test for a safe approximation of number literals that are already
- // in canonical form: max 15 digits, no leading zeroes, except an
- // integer part that is a single zero, and no trailing zeros below
- // the decimal point.
- int pos = 0;
- int length = number.length();
- if (number.length() > 15) return false;
- if (number[pos] == '0') {
- pos++;
+void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
+ } else if (scanner()->is_literal_ascii()) {
+ old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
} else {
- while (pos < length &&
- static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
- }
- if (length == pos) return true;
- if (number[pos] != '.') return false;
- pos++;
- bool invalid_last_digit = true;
- while (pos < length) {
- byte digit = number[pos] - '0';
- if (digit > '9' - '0') return false;
- invalid_last_digit = (digit == 0);
- pos++;
+ old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
}
- return !invalid_last_digit;
-}
-
-
-uint32_t DuplicateFinder::Hash(i::Vector<const byte> key, bool is_ascii) {
- // Primitive hash function, almost identical to the one used
- // for strings (except that it's seeded by the length and ASCII-ness).
- int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
- for (int i = 0; i < length; i++) {
- uint32_t c = key[i];
- hash = (hash + c) * 1025;
- hash ^= (hash >> 6);
- }
- return hash;
-}
-
-
-bool DuplicateFinder::Match(void* first, void* second) {
- // Decode lengths.
- // Length + ASCII-bit is encoded as base 128, most significant heptet first,
- // with a 8th bit being non-zero while there are more heptets.
- // The value encodes the number of bytes following, and whether the original
- // was ASCII.
- byte* s1 = reinterpret_cast<byte*>(first);
- byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
- byte c1;
- do {
- c1 = *s1;
- if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
- s1++;
- s2++;
- } while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
- return memcmp(s1, s2, length) == 0;
-}
-
-
-byte* DuplicateFinder::BackupKey(i::Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
- backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
- // on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (language_mode_ == CLASSIC_MODE) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ *ok = false;
}
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
-
- backing_store_.AddBlock(bytes);
- return backing_store_.EndSequence().start();
}
-} } // v8::preparser
+
+} } // v8::internal
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 9358d6bd18..e99b4b0a18 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -33,14 +33,178 @@
#include "scanner.h"
namespace v8 {
-
namespace internal {
-class UnicodeCache;
-}
-namespace preparser {
+// Common base class shared between parser and pre-parser.
+class ParserBase {
+ public:
+ ParserBase(Scanner* scanner, uintptr_t stack_limit)
+ : scanner_(scanner),
+ stack_limit_(stack_limit),
+ stack_overflow_(false),
+ allow_lazy_(false),
+ allow_natives_syntax_(false),
+ allow_generators_(false),
+ allow_for_of_(false) { }
+ // TODO(mstarzinger): Only virtual until message reporting has been unified.
+ virtual ~ParserBase() { }
+
+ // Getters that indicate whether certain syntactical constructs are
+ // allowed to be parsed by this instance of the parser.
+ bool allow_lazy() const { return allow_lazy_; }
+ bool allow_natives_syntax() const { return allow_natives_syntax_; }
+ bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
+ bool allow_modules() const { return scanner()->HarmonyModules(); }
+ bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
+ bool allow_harmony_numeric_literals() const {
+ return scanner()->HarmonyNumericLiterals();
+ }
+
+ // Setters that determine whether certain syntactical constructs are
+ // allowed to be parsed by this instance of the parser.
+ void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
+ void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
+ void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
+ void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); }
+ void set_allow_harmony_scoping(bool allow) {
+ scanner()->SetHarmonyScoping(allow);
+ }
+ void set_allow_harmony_numeric_literals(bool allow) {
+ scanner()->SetHarmonyNumericLiterals(allow);
+ }
+
+ protected:
+ Scanner* scanner() const { return scanner_; }
+ int position() { return scanner_->location().beg_pos; }
+ int peek_position() { return scanner_->peek_location().beg_pos; }
+ bool stack_overflow() const { return stack_overflow_; }
+ void set_stack_overflow() { stack_overflow_ = true; }
+
+ INLINE(Token::Value peek()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ return scanner()->peek();
+ }
+
+ INLINE(Token::Value Next()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ {
+ int marker;
+ if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
+ // Any further calls to Next or peek will return the illegal token.
+ // The current call must return the next token, which might already
+ // have been peek'ed.
+ stack_overflow_ = true;
+ }
+ }
+ return scanner()->Next();
+ }
+
+ void Consume(Token::Value token) {
+ Token::Value next = Next();
+ USE(next);
+ USE(token);
+ ASSERT(next == token);
+ }
+
+ bool Check(Token::Value token) {
+ Token::Value next = peek();
+ if (next == token) {
+ Consume(next);
+ return true;
+ }
+ return false;
+ }
+
+ void Expect(Token::Value token, bool* ok) {
+ Token::Value next = Next();
+ if (next != token) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ }
+ }
+
+ bool peek_any_identifier();
+ void ExpectSemicolon(bool* ok);
+ bool CheckContextualKeyword(Vector<const char> keyword);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+
+ // Strict mode octal literal validation.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
+ // Determine precedence of given token.
+ static int Precedence(Token::Value token, bool accept_IN);
+
+ // Report syntax errors.
+ virtual void ReportUnexpectedToken(Token::Value token) = 0;
+ virtual void ReportMessageAt(Scanner::Location loc, const char* type) = 0;
+
+ // Used to detect duplicates in object literals. Each of the values
+ // kGetterProperty, kSetterProperty and kValueProperty represents
+ // a type of object literal property. When parsing a property, its
+ // type value is stored in the DuplicateFinder for the property name.
+ // Values are chosen so that having intersection bits means the there is
+ // an incompatibility.
+ // I.e., you can add a getter to a property that already has a setter, since
+ // kGetterProperty and kSetterProperty doesn't intersect, but not if it
+ // already has a getter or a value. Adding the getter to an existing
+ // setter will store the value (kGetterProperty | kSetterProperty), which
+ // is incompatible with adding any further properties.
+ enum PropertyKind {
+ kNone = 0,
+ // Bit patterns representing different object literal property types.
+ kGetterProperty = 1,
+ kSetterProperty = 2,
+ kValueProperty = 7,
+ // Helper constants.
+ kValueFlag = 4
+ };
+
+ // Validation per ECMA 262 - 11.1.5 "Object Initialiser".
+ class ObjectLiteralChecker {
+ public:
+ ObjectLiteralChecker(ParserBase* parser, LanguageMode mode)
+ : parser_(parser),
+ finder_(scanner()->unicode_cache()),
+ language_mode_(mode) { }
+
+ void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
+
+ private:
+ ParserBase* parser() const { return parser_; }
+ Scanner* scanner() const { return parser_->scanner(); }
+
+ // Checks the type of conflict based on values coming from PropertyType.
+ bool HasConflict(PropertyKind type1, PropertyKind type2) {
+ return (type1 & type2) != 0;
+ }
+ bool IsDataDataConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 & type2) & kValueFlag) != 0;
+ }
+ bool IsDataAccessorConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 ^ type2) & kValueFlag) != 0;
+ }
+ bool IsAccessorAccessorConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 | type2) & kValueFlag) == 0;
+ }
+
+ ParserBase* parser_;
+ DuplicateFinder finder_;
+ LanguageMode language_mode_;
+ };
+
+ private:
+ Scanner* scanner_;
+ uintptr_t stack_limit_;
+ bool stack_overflow_;
+
+ bool allow_lazy_;
+ bool allow_natives_syntax_;
+ bool allow_generators_;
+ bool allow_for_of_;
+};
-typedef uint8_t byte;
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
@@ -54,104 +218,25 @@ typedef uint8_t byte;
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-
-namespace i = v8::internal;
-
-class DuplicateFinder {
- public:
- explicit DuplicateFinder(i::UnicodeCache* constants)
- : unicode_constants_(constants),
- backing_store_(16),
- map_(&Match) { }
-
- int AddAsciiSymbol(i::Vector<const char> key, int value);
- int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
- // Add a a number literal by converting it (if necessary)
- // to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddAsciiSymbol.
- // This string is the actual value used as key in an object literal,
- // and the one that must be different from the other keys.
- int AddNumber(i::Vector<const char> key, int value);
-
- private:
- int AddSymbol(i::Vector<const byte> key, bool is_ascii, int value);
- // Backs up the key and its length in the backing store.
- // The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
- // followed by the bytes of the key.
- byte* BackupKey(i::Vector<const byte> key, bool is_ascii);
-
- // Compare two encoded keys (both pointing into the backing store)
- // for having the same base-127 encoded lengths and ASCII-ness,
- // and then having the same 'length' bytes following.
- static bool Match(void* first, void* second);
- // Creates a hash from a sequence of bytes.
- static uint32_t Hash(i::Vector<const byte> key, bool is_ascii);
- // Checks whether a string containing a JS number is its canonical
- // form.
- static bool IsNumberCanonical(i::Vector<const char> key);
-
- // Size of buffer. Sufficient for using it to call DoubleToCString in
- // from conversions.h.
- static const int kBufferSize = 100;
-
- i::UnicodeCache* unicode_constants_;
- // Backing store used to store strings used as hashmap keys.
- i::SequenceCollector<unsigned char> backing_store_;
- i::HashMap map_;
- // Buffer used for string->number->canonical string conversions.
- char number_buffer_[kBufferSize];
-};
-
-
-class PreParser {
+class PreParser : public ParserBase {
public:
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
};
-
- PreParser(i::Scanner* scanner,
- i::ParserRecorder* log,
+ PreParser(Scanner* scanner,
+ ParserRecorder* log,
uintptr_t stack_limit)
- : scanner_(scanner),
+ : ParserBase(scanner, stack_limit),
log_(log),
scope_(NULL),
- stack_limit_(stack_limit),
- strict_mode_violation_location_(i::Scanner::Location::invalid()),
+ strict_mode_violation_location_(Scanner::Location::invalid()),
strict_mode_violation_type_(NULL),
- stack_overflow_(false),
- allow_lazy_(false),
- allow_natives_syntax_(false),
- allow_generators_(false),
- allow_for_of_(false),
parenthesized_function_(false) { }
~PreParser() {}
- bool allow_natives_syntax() const { return allow_natives_syntax_; }
- bool allow_lazy() const { return allow_lazy_; }
- bool allow_modules() const { return scanner_->HarmonyModules(); }
- bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); }
- bool allow_generators() const { return allow_generators_; }
- bool allow_for_of() const { return allow_for_of_; }
- bool allow_harmony_numeric_literals() const {
- return scanner_->HarmonyNumericLiterals();
- }
-
- void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
- void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
- void set_allow_modules(bool allow) { scanner_->SetHarmonyModules(allow); }
- void set_allow_harmony_scoping(bool allow) {
- scanner_->SetHarmonyScoping(allow);
- }
- void set_allow_generators(bool allow) { allow_generators_ = allow; }
- void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
- void set_allow_harmony_numeric_literals(bool allow) {
- scanner_->SetHarmonyNumericLiterals(allow);
- }
-
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
@@ -159,13 +244,13 @@ class PreParser {
PreParseResult PreParseProgram() {
Scope top_scope(&scope_, kTopLevelScope);
bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
- ParseSourceElements(i::Token::EOS, &ok);
- if (stack_overflow_) return kPreParseStackOverflow;
+ int start_position = scanner()->peek_location().beg_pos;
+ ParseSourceElements(Token::EOS, &ok);
+ if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
+ ReportUnexpectedToken(scanner()->current_token());
} else if (!scope_->is_classic_mode()) {
- CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
+ CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
}
return kPreParseSuccess;
}
@@ -178,50 +263,11 @@ class PreParser {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(i::LanguageMode mode,
+ PreParseResult PreParseLazyFunction(LanguageMode mode,
bool is_generator,
- i::ParserRecorder* log);
+ ParserRecorder* log);
private:
- // Used to detect duplicates in object literals. Each of the values
- // kGetterProperty, kSetterProperty and kValueProperty represents
- // a type of object literal property. When parsing a property, its
- // type value is stored in the DuplicateFinder for the property name.
- // Values are chosen so that having intersection bits means the there is
- // an incompatibility.
- // I.e., you can add a getter to a property that already has a setter, since
- // kGetterProperty and kSetterProperty doesn't intersect, but not if it
- // already has a getter or a value. Adding the getter to an existing
- // setter will store the value (kGetterProperty | kSetterProperty), which
- // is incompatible with adding any further properties.
- enum PropertyType {
- kNone = 0,
- // Bit patterns representing different object literal property types.
- kGetterProperty = 1,
- kSetterProperty = 2,
- kValueProperty = 7,
- // Helper constants.
- kValueFlag = 4
- };
-
- // Checks the type of conflict based on values coming from PropertyType.
- bool HasConflict(int type1, int type2) { return (type1 & type2) != 0; }
- bool IsDataDataConflict(int type1, int type2) {
- return ((type1 & type2) & kValueFlag) != 0;
- }
- bool IsDataAccessorConflict(int type1, int type2) {
- return ((type1 ^ type2) & kValueFlag) != 0;
- }
- bool IsAccessorAccessorConflict(int type1, int type2) {
- return ((type1 | type2) & kValueFlag) == 0;
- }
-
-
- void CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok);
-
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
@@ -441,7 +487,7 @@ class PreParser {
}
bool IsStringLiteral() {
- return code_ != kUnknownStatement;
+ return code_ == kStringLiteralExpressionStatement;
}
bool IsUseStrictLiteral() {
@@ -480,7 +526,7 @@ class PreParser {
expected_properties_(0),
with_nesting_count_(0),
language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE),
+ (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
is_generator_(false) {
*variable = this;
}
@@ -494,12 +540,12 @@ class PreParser {
bool is_generator() { return is_generator_; }
void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
bool is_classic_mode() {
- return language_mode_ == i::CLASSIC_MODE;
+ return language_mode_ == CLASSIC_MODE;
}
- i::LanguageMode language_mode() {
+ LanguageMode language_mode() {
return language_mode_;
}
- void set_language_mode(i::LanguageMode language_mode) {
+ void set_language_mode(LanguageMode language_mode) {
language_mode_ = language_mode;
}
@@ -523,13 +569,16 @@ class PreParser {
int materialized_literal_count_;
int expected_properties_;
int with_nesting_count_;
- i::LanguageMode language_mode_;
+ LanguageMode language_mode_;
bool is_generator_;
};
// Report syntax error
- void ReportUnexpectedToken(i::Token::Value token);
- void ReportMessageAt(i::Scanner::Location location,
+ void ReportUnexpectedToken(Token::Value token);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, NULL);
+ }
+ void ReportMessageAt(Scanner::Location location,
const char* type,
const char* name_opt) {
log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
@@ -541,8 +590,6 @@ class PreParser {
log_->LogMessage(start_pos, end_pos, type, name_opt);
}
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -606,87 +653,40 @@ class PreParser {
// Log the currently parsed string literal.
Expression GetStringSymbol();
- i::Token::Value peek() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- return scanner_->peek();
- }
-
- i::Token::Value Next() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- {
- int marker;
- if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
- // Further calls to peek/Next will return illegal token.
- // The current one will still be returned. It might already
- // have been seen using peek.
- stack_overflow_ = true;
- }
- }
- return scanner_->Next();
- }
-
- bool peek_any_identifier();
-
- void set_language_mode(i::LanguageMode language_mode) {
+ void set_language_mode(LanguageMode language_mode) {
scope_->set_language_mode(language_mode);
}
bool is_classic_mode() {
- return scope_->language_mode() == i::CLASSIC_MODE;
+ return scope_->language_mode() == CLASSIC_MODE;
}
bool is_extended_mode() {
- return scope_->language_mode() == i::EXTENDED_MODE;
+ return scope_->language_mode() == EXTENDED_MODE;
}
- i::LanguageMode language_mode() { return scope_->language_mode(); }
-
- void Consume(i::Token::Value token) { Next(); }
-
- void Expect(i::Token::Value token, bool* ok) {
- if (Next() != token) {
- *ok = false;
- }
- }
-
- bool Check(i::Token::Value token) {
- i::Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
- }
- void ExpectSemicolon(bool* ok);
+ LanguageMode language_mode() { return scope_->language_mode(); }
bool CheckInOrOf(bool accept_OF);
- static int Precedence(i::Token::Value tok, bool accept_IN);
-
- void SetStrictModeViolation(i::Scanner::Location,
+ void SetStrictModeViolation(Scanner::Location,
const char* type,
bool* ok);
void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
- void StrictModeIdentifierViolation(i::Scanner::Location,
+ void StrictModeIdentifierViolation(Scanner::Location,
const char* eval_args_type,
Identifier identifier,
bool* ok);
- i::Scanner* scanner_;
- i::ParserRecorder* log_;
+ ParserRecorder* log_;
Scope* scope_;
- uintptr_t stack_limit_;
- i::Scanner::Location strict_mode_violation_location_;
+ Scanner::Location strict_mode_violation_location_;
const char* strict_mode_violation_type_;
- bool stack_overflow_;
- bool allow_lazy_;
- bool allow_natives_syntax_;
- bool allow_generators_;
- bool allow_for_of_;
bool parenthesized_function_;
};
-} } // v8::preparser
+
+} } // v8::internal
#endif // V8_PREPARSER_H
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index b1bac4cd4a..4b441b9ae4 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -200,11 +200,25 @@ void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
Print(") { ");
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++)
- PrintCaseClause(cases->at(i));
+ Visit(cases->at(i));
Print("}");
}
+void PrettyPrinter::VisitCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ Print("default");
+ } else {
+ Print("case ");
+ Visit(clause->label());
+ }
+ Print(": ");
+ PrintStatements(clause->statements());
+ if (clause->statements()->length() > 0)
+ Print(" ");
+}
+
+
void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
PrintLabels(node->labels());
Print("do ");
@@ -297,10 +311,9 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void PrettyPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
+void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
Print("(");
- PrintLiteral(node->shared_function_info(), true);
+ PrintLiteral(node->name(), false);
Print(")");
}
@@ -621,20 +634,6 @@ void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
}
-void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- Print("default");
- } else {
- Print("case ");
- Visit(clause->label());
- }
- Print(": ");
- PrintStatements(clause->statements());
- if (clause->statements()->length() > 0)
- Print(" ");
-}
-
-
//-----------------------------------------------------------------------------
class IndentedScope BASE_EMBEDDED {
@@ -762,18 +761,6 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
}
-void AstPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
- PrintStatements(clause->statements());
- } else {
- IndentedScope indent(this, "CASE");
- Visit(clause->label());
- PrintStatements(clause->statements());
- }
-}
-
-
void AstPrinter::VisitBlock(Block* node) {
const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
IndentedScope indent(this, block_txt);
@@ -901,7 +888,19 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
- PrintCaseClause(node->cases()->at(i));
+ Visit(node->cases()->at(i));
+ }
+}
+
+
+void AstPrinter::VisitCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ IndentedScope indent(this, "DEFAULT");
+ PrintStatements(clause->statements());
+ } else {
+ IndentedScope indent(this, "CASE");
+ Visit(clause->label());
+ PrintStatements(clause->statements());
}
}
@@ -982,10 +981,9 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void AstPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
+void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
+ IndentedScope indent(this, "NATIVE FUNC LITERAL");
+ PrintLiteralIndented("NAME", node->name(), false);
}
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index f2feb73fc9..e363f67761 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -33,27 +33,19 @@
namespace v8 {
namespace internal {
-const char* StringsStorage::GetFunctionName(Name* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
-}
-
-
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
const char* name_prefix,
const char* resource_name,
- int line_number)
+ int line_number,
+ int column_number)
: tag_(tag),
builtin_id_(Builtins::builtin_count),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
line_number_(line_number),
+ column_number_(column_number),
shared_id_(0),
script_id_(v8::Script::kNoScriptId),
no_frame_ranges_(NULL),
@@ -77,25 +69,6 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
children_(CodeEntriesMatch),
id_(tree->next_node_id()) { }
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return gc_entry_;
- case JS:
- case COMPILER:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return program_entry_;
- case IDLE:
- return idle_entry_;
- default: return NULL;
- }
-}
-
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 38c1f785d9..acf54da1c7 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -41,6 +41,12 @@ namespace v8 {
namespace internal {
+bool StringsStorage::StringsMatch(void* key1, void* key2) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
+}
+
+
StringsStorage::StringsStorage(Heap* heap)
: hash_seed_(heap->HashSeed()), names_(StringsMatch) {
}
@@ -57,12 +63,15 @@ StringsStorage::~StringsStorage() {
const char* StringsStorage::GetCopy(const char* src) {
int len = static_cast<int>(strlen(src));
- Vector<char> dst = Vector<char>::New(len + 1);
- OS::StrNCpy(dst, src, len);
- dst[len] = '\0';
- uint32_t hash =
- StringHasher::HashSequentialString(dst.start(), len, hash_seed_);
- return AddOrDisposeString(dst.start(), hash);
+ HashMap::Entry* entry = GetEntry(src, len);
+ if (entry->value == NULL) {
+ Vector<char> dst = Vector<char>::New(len + 1);
+ OS::StrNCpy(dst, src, len);
+ dst[len] = '\0';
+ entry->key = dst.start();
+ entry->value = entry->key;
+ }
+ return reinterpret_cast<const char*>(entry->value);
}
@@ -75,15 +84,16 @@ const char* StringsStorage::GetFormatted(const char* format, ...) {
}
-const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
- HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
- if (cache_entry->value == NULL) {
+const char* StringsStorage::AddOrDisposeString(char* str, int len) {
+ HashMap::Entry* entry = GetEntry(str, len);
+ if (entry->value == NULL) {
// New entry added.
- cache_entry->value = str;
+ entry->key = str;
+ entry->value = str;
} else {
DeleteArray(str);
}
- return reinterpret_cast<const char*>(cache_entry->value);
+ return reinterpret_cast<const char*>(entry->value);
}
@@ -92,11 +102,9 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
int len = OS::VSNPrintF(str, format, args);
if (len == -1) {
DeleteArray(str.start());
- return format;
+ return GetCopy(format);
}
- uint32_t hash = StringHasher::HashSequentialString(
- str.start(), len, hash_seed_);
- return AddOrDisposeString(str.start(), hash);
+ return AddOrDisposeString(str.start(), len);
}
@@ -104,11 +112,11 @@ const char* StringsStorage::GetName(Name* name) {
if (name->IsString()) {
String* str = String::cast(name);
int length = Min(kMaxNameSize, str->length());
+ int actual_length = 0;
SmartArrayPointer<char> data =
- str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
- uint32_t hash = StringHasher::HashSequentialString(
- *data, length, name->GetHeap()->HashSeed());
- return AddOrDisposeString(data.Detach(), hash);
+ str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length,
+ &actual_length);
+ return AddOrDisposeString(data.Detach(), actual_length);
} else if (name->IsSymbol()) {
return "<symbol>";
}
@@ -121,6 +129,21 @@ const char* StringsStorage::GetName(int index) {
}
+const char* StringsStorage::GetFunctionName(Name* name) {
+ return BeautifyFunctionName(GetName(name));
+}
+
+
+const char* StringsStorage::GetFunctionName(const char* name) {
+ return BeautifyFunctionName(GetCopy(name));
+}
+
+
+const char* StringsStorage::BeautifyFunctionName(const char* name) {
+ return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
+}
+
+
size_t StringsStorage::GetUsedMemorySize() const {
size_t size = sizeof(*this);
size += sizeof(HashMap::Entry) * names_.capacity();
@@ -131,6 +154,12 @@ size_t StringsStorage::GetUsedMemorySize() const {
}
+HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+ uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
+ return names_.Lookup(const_cast<char*>(str), hash, true);
+}
+
+
const char* const CodeEntry::kEmptyNamePrefix = "";
const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
@@ -141,15 +170,6 @@ CodeEntry::~CodeEntry() {
}
-void CodeEntry::CopyData(const CodeEntry& source) {
- tag_ = source.tag_;
- name_prefix_ = source.name_prefix_;
- name_ = source.name_;
- resource_name_ = source.resource_name_;
- line_number_ = source.line_number_;
-}
-
-
uint32_t CodeEntry::GetCallUid() const {
uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
if (shared_id_ != 0) {
@@ -546,12 +566,14 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(
const char* name,
const char* name_prefix,
const char* resource_name,
- int line_number) {
+ int line_number,
+ int column_number) {
CodeEntry* code_entry = new CodeEntry(tag,
name,
name_prefix,
resource_name,
- line_number);
+ line_number,
+ column_number);
code_entries_.Add(code_entry);
return code_entry;
}
@@ -660,4 +682,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
+CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
+ switch (tag) {
+ case GC:
+ return gc_entry_;
+ case JS:
+ case COMPILER:
+ // DOM events handlers are reported as OTHER / EXTERNAL entries.
+ // To avoid confusing people, let's put all these entries into
+ // one bucket.
+ case OTHER:
+ case EXTERNAL:
+ return program_entry_;
+ case IDLE:
+ return idle_entry_;
+ default: return NULL;
+ }
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 0a4502cc1b..6e4758bece 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -49,20 +49,18 @@ class StringsStorage {
const char* GetVFormatted(const char* format, va_list args);
const char* GetName(Name* name);
const char* GetName(int index);
- inline const char* GetFunctionName(Name* name);
- inline const char* GetFunctionName(const char* name);
+ const char* GetFunctionName(Name* name);
+ const char* GetFunctionName(const char* name);
size_t GetUsedMemorySize() const;
private:
static const int kMaxNameSize = 1024;
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
- const char* AddOrDisposeString(char* str, uint32_t hash);
+ static bool StringsMatch(void* key1, void* key2);
+ const char* BeautifyFunctionName(const char* name);
+ const char* AddOrDisposeString(char* str, int len);
+ HashMap::Entry* GetEntry(const char* str, int len);
- // Mapping of strings by String::Hash to const char* strings.
uint32_t hash_seed_;
HashMap names_;
@@ -73,28 +71,30 @@ class StringsStorage {
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
- INLINE(CodeEntry(Logger::LogEventsAndTags tag,
+ inline CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
- int line_number = v8::CpuProfileNode::kNoLineNumberInfo));
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
~CodeEntry();
- INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
- INLINE(const char* name_prefix() const) { return name_prefix_; }
- INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
- INLINE(const char* name() const) { return name_; }
- INLINE(const char* resource_name() const) { return resource_name_; }
- INLINE(int line_number() const) { return line_number_; }
- INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
- INLINE(int script_id() const) { return script_id_; }
- INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
- INLINE(void set_bailout_reason(const char* bailout_reason)) {
+ bool is_js_function() const { return is_js_function_tag(tag_); }
+ const char* name_prefix() const { return name_prefix_; }
+ bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
+ const char* name() const { return name_; }
+ const char* resource_name() const { return resource_name_; }
+ int line_number() const { return line_number_; }
+ int column_number() const { return column_number_; }
+ void set_shared_id(int shared_id) { shared_id_ = shared_id; }
+ int script_id() const { return script_id_; }
+ void set_script_id(int script_id) { script_id_ = script_id; }
+ void set_bailout_reason(const char* bailout_reason) {
bailout_reason_ = bailout_reason;
}
- INLINE(const char* bailout_reason() const) { return bailout_reason_; }
+ const char* bailout_reason() const { return bailout_reason_; }
- INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
+ static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
void set_no_frame_ranges(List<OffsetRange>* ranges) {
@@ -104,7 +104,6 @@ class CodeEntry {
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const { return builtin_id_; }
- void CopyData(const CodeEntry& source);
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
@@ -119,6 +118,7 @@ class CodeEntry {
const char* name_;
const char* resource_name_;
int line_number_;
+ int column_number_;
int shared_id_;
int script_id_;
List<OffsetRange>* no_frame_ranges_;
@@ -132,27 +132,27 @@ class ProfileTree;
class ProfileNode {
public:
- INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
+ inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
- INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
- INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
+ void IncrementSelfTicks() { ++self_ticks_; }
+ void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
- INLINE(CodeEntry* entry() const) { return entry_; }
- INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
+ CodeEntry* entry() const { return entry_; }
+ unsigned self_ticks() const { return self_ticks_; }
+ const List<ProfileNode*>* children() const { return &children_list_; }
unsigned id() const { return id_; }
void Print(int indent);
private:
- INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
+ static bool CodeEntriesMatch(void* entry1, void* entry2) {
return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
reinterpret_cast<CodeEntry*>(entry2));
}
- INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
+ static uint32_t CodeEntryHash(CodeEntry* entry) {
return entry->GetCallUid();
}
@@ -304,7 +304,8 @@ class CpuProfilesCollection {
const char* name,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
- int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
@@ -331,7 +332,7 @@ class ProfileGenerator {
void RecordTickSample(const TickSample& sample);
- INLINE(CodeMap* code_map()) { return &code_map_; }
+ CodeMap* code_map() { return &code_map_; }
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
@@ -342,7 +343,7 @@ class ProfileGenerator {
static const char* const kUnresolvedFunctionName;
private:
- INLINE(CodeEntry* EntryForVMState(StateTag tag));
+ CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
CodeMap code_map_;
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 7f44b79277..659fbd1da6 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -82,6 +82,7 @@ class Representation {
public:
enum Kind {
kNone,
+ kByte,
kSmi,
kInteger32,
kDouble,
@@ -95,6 +96,7 @@ class Representation {
static Representation None() { return Representation(kNone); }
static Representation Tagged() { return Representation(kTagged); }
+ static Representation Byte() { return Representation(kByte); }
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
@@ -139,6 +141,7 @@ class Representation {
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
+ bool IsByte() const { return kind_ == kByte; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsSmi() const { return kind_ == kSmi; }
bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
@@ -148,7 +151,7 @@ class Representation {
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble || kind_ == kSmi;
+ return IsByte() || IsSmi() || IsInteger32() || IsDouble();
}
const char* Mnemonic() const;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index de9be50ddc..4c03f21538 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -40,7 +40,7 @@ function ProxyCreate(handler, proto) {
throw MakeTypeError("handler_non_object", ["create"])
if (IS_UNDEFINED(proto))
proto = null
- else if (!(IS_SPEC_OBJECT(proto) || proto === null))
+ else if (!(IS_SPEC_OBJECT(proto) || IS_NULL(proto)))
throw MakeTypeError("proto_non_object", ["create"])
return %CreateJSProxy(handler, proto)
}
@@ -56,7 +56,7 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) {
// Make sure the trap receives 'undefined' as this.
var construct = constructTrap
constructTrap = function() {
- return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
+ return %Apply(construct, UNDEFINED, arguments, 0, %_ArgumentsLength());
}
} else {
throw MakeTypeError("trap_function_expected",
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index cb11ad107c..22b08775b3 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -189,7 +189,7 @@ function RegExpExec(string) {
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return null;
}
@@ -232,7 +232,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
@@ -253,7 +253,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
@@ -384,7 +384,7 @@ function RegExpMakeCaptureGetter(n) {
var lastMatchInfo = new InternalPackedArray(
2, // REGEXP_NUMBER_OF_CAPTURES
"", // Last subject.
- void 0, // Last input - settable with RegExpSetInput.
+ UNDEFINED, // Last input - settable with RegExpSetInput.
0, // REGEXP_FIRST_CAPTURE + 0
0 // REGEXP_FIRST_CAPTURE + 1
);
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 06335a80c7..70b362fd7d 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -207,6 +207,11 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
}
+void Processor::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void Processor::VisitContinueStatement(ContinueStatement* node) {
is_set_ = false;
}
@@ -271,13 +276,12 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// eval('with ({x:1}) x = 1');
// the end position of the function generated for executing the eval code
// coincides with the end of the with scope which is the position of '1'.
- int position = function->end_position();
+ int pos = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, result->interface(), position);
+ result->name(), false, result->interface(), pos);
result_proxy->BindTo(result);
Statement* result_statement =
- processor.factory()->NewReturnStatement(result_proxy);
- result_statement->set_statement_pos(position);
+ processor.factory()->NewReturnStatement(result_proxy, pos);
body->Add(result_statement, info->zone());
}
}
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 95dcc4f983..7c900b37d9 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -33,7 +33,6 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compilation-cache.h"
-#include "deoptimizer.h"
#include "execution.h"
#include "full-codegen.h"
#include "global-handles.h"
@@ -185,7 +184,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
PrintF("]\n");
}
- Deoptimizer::PatchInterruptCode(isolate_, shared->code());
+ BackEdgeTable::Patch(isolate_, shared->code());
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index c09fb1d499..15cfc854bf 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -31,6 +31,7 @@
#include "v8.h"
#include "accessors.h"
+#include "allocation-site-scopes.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
@@ -348,10 +349,8 @@ MaybeObject* TransitionElements(Handle<Object> object,
ElementsKind from_kind =
Handle<JSObject>::cast(object)->map()->elements_kind();
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result = JSObject::TransitionElementsKind(
- Handle<JSObject>::cast(object), to_kind);
- if (result.is_null()) return isolate->ThrowIllegalOperation();
- return *result;
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ return *object;
}
return isolate->ThrowIllegalOperation();
}
@@ -488,44 +487,39 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return JSObject::cast(*boilerplate)->DeepCopy(isolate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
+ Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<AllocationSite> site;
+ Handle<JSObject> boilerplate;
+ if (*literal_site == isolate->heap()->undefined_value()) {
+ Handle<Object> raw_boilerplate = CreateObjectLiteralBoilerplate(
+ isolate,
+ literals,
+ constant_properties,
+ should_have_fast_elements,
+ has_function_literal);
+ RETURN_IF_EMPTY_HANDLE(isolate, raw_boilerplate);
+ boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+
+ AllocationSiteCreationContext creation_context(isolate);
+ site = creation_context.EnterNewScope();
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::DeepWalk(boilerplate, &creation_context));
+ creation_context.ExitScope(site, boilerplate);
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
// Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
+ literals->set(literals_index, *site);
+ } else {
+ site = Handle<AllocationSite>::cast(literal_site);
+ boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+ isolate);
}
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
+
+ AllocationSiteUsageContext usage_context(isolate, site, true);
+ usage_context.EnterNewScope();
+ Handle<Object> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
@@ -541,9 +535,16 @@ static Handle<AllocationSite> GetLiteralAllocationSite(
ASSERT(*elements != isolate->heap()->empty_fixed_array());
Handle<Object> boilerplate =
Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return site;
- site = isolate->factory()->NewAllocationSite();
- site->set_transition_info(*boilerplate);
+ if (boilerplate.is_null()) return Handle<AllocationSite>::null();
+
+ AllocationSiteCreationContext creation_context(isolate);
+ site = creation_context.EnterNewScope();
+ if (JSObject::DeepWalk(Handle<JSObject>::cast(boilerplate),
+ &creation_context).is_null()) {
+ return Handle<AllocationSite>::null();
+ }
+ creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
+
literals->set(literals_index, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
@@ -564,8 +565,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
literals_index, elements);
RETURN_IF_EMPTY_HANDLE(isolate, site);
- JSObject* boilerplate = JSObject::cast(site->transition_info());
- return boilerplate->DeepCopy(isolate);
+ Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
+ AllocationSiteUsageContext usage_context(isolate, site, true);
+ usage_context.EnterNewScope();
+ Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
@@ -586,11 +592,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
}
- AllocationSiteMode mode = AllocationSite::GetMode(
- boilerplate->GetElementsKind());
- if (mode == TRACK_ALLOCATION_SITE) {
- return isolate->heap()->CopyJSObjectWithAllocationSite(
- boilerplate, *site);
+ if (AllocationSite::GetMode(boilerplate->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ return isolate->heap()->CopyJSObject(boilerplate, *site);
}
return isolate->heap()->CopyJSObject(boilerplate);
@@ -822,6 +826,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return object->IsJSArrayBufferView()
+ ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
+}
+
+
enum TypedArrayId {
// arrayIds below should be synchromized with typedarray.js natives.
ARRAY_ID_UINT8 = 1,
@@ -954,17 +968,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
HandleVector<Object>(NULL, 0)));
}
+ // NOTE: not initializing backing store.
// We assume that the caller of this function will initialize holder
// with the loop
// for(i = 0; i < length; i++) { holder[i] = source[i]; }
+ // We assume that the caller of this function is always a typed array
+ // constructor.
// If source is a typed array, this loop will always run to completion,
// so we are sure that the backing store will be initialized.
- // Otherwise, we do not know (the indexing operation might throw).
- // Hence we require zero initialization unless our source is a typed array.
- bool should_zero_initialize = !source->IsJSTypedArray();
+ // Otherwise, the indexing operation might throw, so the loop will not
+ // run to completion and the typed array might remain partly initialized.
+ // However we further assume that the caller of this function is a typed array
+ // constructor, and the exception will propagate out of the constructor,
+ // therefore uninitialized memory will not be accessible by a user program.
+ //
+ // TODO(dslomov): revise this once we support subclassing.
if (!Runtime::SetupArrayBufferAllocatingData(
- isolate, buffer, byte_length, should_zero_initialize)) {
+ isolate, buffer, byte_length, false)) {
return isolate->Throw(*isolate->factory()->
NewRangeError("invalid_array_buffer_length",
HandleVector<Object>(NULL, 0)));
@@ -1578,24 +1599,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
do {
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(JSObject::cast(obj),
- isolate->heap()->proto_string(),
- v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+ !isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj),
+ isolate->factory()->proto_string(),
+ v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
- obj = obj->GetPrototype(isolate);
+ obj = handle(obj->GetPrototype(isolate), isolate);
} while (obj->IsJSObject() &&
- JSObject::cast(obj)->map()->is_hidden_prototype());
- return obj;
+ JSObject::cast(*obj)->map()->is_hidden_prototype());
+ return *obj;
}
@@ -1654,6 +1675,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
static bool CheckAccessException(Object* callback,
v8::AccessType access_type) {
+ DisallowHeapAllocation no_gc;
if (callback->IsAccessorInfo()) {
AccessorInfo* info = AccessorInfo::cast(callback);
return
@@ -1676,20 +1698,20 @@ static bool CheckAccessException(Object* callback,
template<class Key>
static bool CheckGenericAccess(
- JSObject* receiver,
- JSObject* holder,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Key key,
v8::AccessType access_type,
- bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) {
+ bool (Isolate::*mayAccess)(Handle<JSObject>, Key, v8::AccessType)) {
Isolate* isolate = receiver->GetIsolate();
- for (JSObject* current = receiver;
+ for (Handle<JSObject> current = receiver;
true;
- current = JSObject::cast(current->GetPrototype())) {
+ current = handle(JSObject::cast(current->GetPrototype()), isolate)) {
if (current->IsAccessCheckNeeded() &&
!(isolate->*mayAccess)(current, key, access_type)) {
return false;
}
- if (current == holder) break;
+ if (current.is_identical_to(holder)) break;
}
return true;
}
@@ -1702,28 +1724,29 @@ enum AccessCheckResult {
};
-static AccessCheckResult CheckPropertyAccess(
- JSObject* obj,
- Name* name,
- v8::AccessType access_type) {
+static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
+ Handle<Name> name,
+ v8::AccessType access_type) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
// TODO(1095): we should traverse hidden prototype hierachy as well.
if (CheckGenericAccess(
- obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
+ obj, obj, index, access_type, &Isolate::MayIndexedAccessWrapper)) {
return ACCESS_ALLOWED;
}
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type);
return ACCESS_FORBIDDEN;
}
- LookupResult lookup(obj->GetIsolate());
- obj->LocalLookup(name, &lookup, true);
+ Isolate* isolate = obj->GetIsolate();
+ LookupResult lookup(isolate);
+ obj->LocalLookup(*name, &lookup, true);
if (!lookup.IsProperty()) return ACCESS_ABSENT;
- if (CheckGenericAccess<Object*>(
- obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) {
+ Handle<JSObject> holder(lookup.holder(), isolate);
+ if (CheckGenericAccess<Handle<Object> >(
+ obj, holder, name, access_type, &Isolate::MayNamedAccessWrapper)) {
return ACCESS_ALLOWED;
}
@@ -1740,7 +1763,7 @@ static AccessCheckResult CheckPropertyAccess(
case INTERCEPTOR:
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- lookup.holder()->LookupRealNamedProperty(name, &lookup);
+ holder->LookupRealNamedProperty(*name, &lookup);
if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
return ACCESS_ALLOWED;
@@ -1751,7 +1774,7 @@ static AccessCheckResult CheckPropertyAccess(
break;
}
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ isolate->ReportFailedAccessCheck(*obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1769,30 +1792,30 @@ enum PropertyDescriptorIndices {
};
-static MaybeObject* GetOwnProperty(Isolate* isolate,
- Handle<JSObject> obj,
- Handle<Name> name) {
+static Handle<Object> GetOwnProperty(Isolate* isolate,
+ Handle<JSObject> obj,
+ Handle<Name> name) {
Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
// Due to some WebKit tests, we want to make sure that we do not log
// more than one access failure here.
AccessCheckResult access_check_result =
- CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ CheckPropertyAccess(obj, name, v8::ACCESS_HAS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
switch (access_check_result) {
- case ACCESS_FORBIDDEN: return heap->false_value();
+ case ACCESS_FORBIDDEN: return factory->false_value();
case ACCESS_ALLOWED: break;
- case ACCESS_ABSENT: return heap->undefined_value();
+ case ACCESS_ABSENT: return factory->undefined_value();
}
PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
if (attrs == ABSENT) {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->undefined_value();
}
ASSERT(!isolate->has_scheduled_exception());
AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
Handle<AccessorPair> accessors(raw_accessors, isolate);
-
Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
@@ -1802,28 +1825,30 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
// GetProperty does access check.
Handle<Object> value = GetProperty(isolate, obj, name);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<Object>::null());
elms->set(VALUE_INDEX, *value);
} else {
// Access checks are performed for both accessors separately.
// When they fail, the respective field is not set in the descriptor.
- Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
+ Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
+ Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
+
+ if (!getter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_GET)) {
ASSERT(!isolate->has_scheduled_exception());
- elms->set(GETTER_INDEX, getter);
+ elms->set(GETTER_INDEX, *getter);
} else {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
- if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
+
+ if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) {
ASSERT(!isolate->has_scheduled_exception());
- elms->set(SETTER_INDEX, setter);
+ elms->set(SETTER_INDEX, *setter);
} else {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
}
- return *isolate->factory()->NewJSArrayWithElements(elms);
+ return isolate->factory()->NewJSArrayWithElements(elms);
}
@@ -1839,15 +1864,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- return GetOwnProperty(isolate, obj, name);
+ Handle<Object> result = GetOwnProperty(isolate, obj, name);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return obj->PreventExtensions();
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ Handle<Object> result = JSObject::PreventExtensions(obj);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1871,8 +1900,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result =
- RegExpImpl::Compile(re, pattern, flags);
+ Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2164,7 +2192,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!object->HasLocalProperty(*name));
+ ASSERT(!JSReceiver::HasLocalProperty(object, name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -2196,7 +2224,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == value (optional)
@@ -2207,7 +2235,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
bool assign = args.length() == 3;
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- GlobalObject* global = isolate->context()->global_object();
RUNTIME_ASSERT(args[1]->IsSmi());
CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
@@ -2224,28 +2251,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
- Object* object = global;
LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookup(*name, &lookup, true);
+ isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
PropertyAttributes intercepted =
lookup.holder()->GetPropertyAttribute(*name);
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
// Found an interceptor that's not read only.
if (assign) {
- return lookup.holder()->SetProperty(
- &lookup, *name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> result = JSObject::SetPropertyForResult(
+ handle(lookup.holder()), &lookup, name, value, attributes,
+ strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
return isolate->heap()->undefined_value();
}
}
}
- // Reload global in case the loop above performed a GC.
- global = isolate->context()->global_object();
if (assign) {
- return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<Object> result = JSReceiver::SetProperty(
+ global, name, value, attributes, strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return isolate->heap()->undefined_value();
}
@@ -2901,19 +2933,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
source_shared->set_dont_flush(true);
// Set the code, scope info, formal parameter count, and the length
- // of the target shared function info. Set the source code of the
- // target function to undefined. SetCode is only used for built-in
- // constructors like String, Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
+ // of the target shared function info.
target_shared->ReplaceCode(source_shared->code());
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_formal_parameter_count(
source_shared->formal_parameter_count());
- target_shared->set_script(isolate->heap()->undefined_value());
-
- // Since we don't store the source we should never optimize this.
- target_shared->code()->set_optimizable(false);
+ target_shared->set_script(source_shared->script());
+ target_shared->set_start_position_and_type(
+ source_shared->start_position_and_type());
+ target_shared->set_end_position(source_shared->end_position());
+ bool was_native = target_shared->native();
+ target_shared->set_compiler_hints(source_shared->compiler_hints());
+ target_shared->set_native(was_native);
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
@@ -2945,10 +2977,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
CONVERT_SMI_ARG_CHECKED(num, 1);
RUNTIME_ASSERT(num >= 0);
- SetExpectedNofProperties(function, num);
+ // If objects constructed from this function exist then changing
+ // 'estimated_nof_properties' is dangerous since the previous value might
+ // have been compiled into the fast construct stub. Moreover, the inobject
+ // slack tracking logic might have adjusted the previous value, so even
+ // passing the same value is risky.
+ if (!func->shared()->live_objects_may_exist()) {
+ func->shared()->set_expected_nof_properties(num);
+ if (func->has_initial_map()) {
+ Handle<Map> new_initial_map =
+ func->GetIsolate()->factory()->CopyMap(
+ Handle<Map>(func->initial_map()));
+ new_initial_map->set_unused_property_fields(num);
+ func->set_initial_map(*new_initial_map);
+ }
+ }
return isolate->heap()->undefined_value();
}
@@ -3090,10 +3136,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- return object->Freeze(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ Handle<Object> result = JSObject::Freeze(object);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -4778,7 +4826,7 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return isolate->heap()->ToBoolean(object->HasElement(index));
+ return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index));
}
// Convert the key to a name - possibly by calling back into JavaScript.
@@ -4793,7 +4841,7 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
name = Handle<Name>::cast(converted);
}
- return isolate->heap()->ToBoolean(object->HasProperty(*name));
+ return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name));
}
MaybeObject* Runtime::GetObjectPropertyOrFail(
@@ -5028,11 +5076,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// TODO(mstarzinger): So far this only works if property attributes don't
// change, this should be fixed once we cleanup the underlying code.
if (callback->IsForeign() && result.GetAttributes() == attr) {
- return js_object->SetPropertyWithCallback(callback,
- *name,
- *obj_value,
- result.holder(),
- kStrictMode);
+ Handle<Object> result_object =
+ JSObject::SetPropertyWithCallback(js_object,
+ handle(callback, isolate),
+ name,
+ obj_value,
+ handle(result.holder()),
+ kStrictMode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result_object);
+ return *result_object;
}
}
@@ -5128,11 +5180,14 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (object->IsJSProxy()) {
bool has_pending_exception = false;
- Handle<Object> name = key->IsSymbol()
+ Handle<Object> name_object = key->IsSymbol()
? key : Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- return JSProxy::cast(*object)->SetProperty(
- Name::cast(*name), *value, attr, strict_mode);
+ Handle<Name> name = Handle<Name>::cast(name_object);
+ Handle<Object> result = JSReceiver::SetProperty(
+ Handle<JSProxy>::cast(object), name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
// If the object isn't a JavaScript object, we ignore the store.
@@ -5172,7 +5227,6 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
}
if (key->IsName()) {
- MaybeObject* result;
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
if (js_object->HasExternalArrayElements()) {
@@ -5184,13 +5238,15 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
value = number;
}
}
- result = js_object->SetElement(
+ MaybeObject* result = js_object->SetElement(
index, *value, attr, strict_mode, true, set_mode);
+ if (result->IsFailure()) return result;
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- result = js_object->SetProperty(*name, *value, attr, strict_mode);
+ Handle<Object> result =
+ JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
}
- if (result->IsFailure()) return result;
return *value;
}
@@ -5205,7 +5261,10 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return js_object->SetElement(
index, *value, attr, strict_mode, true, set_mode);
} else {
- return js_object->SetProperty(*name, *value, attr, strict_mode);
+ Handle<Object> result =
+ JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -5504,7 +5563,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> key) {
- if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
+ if (JSReceiver::HasLocalProperty(object, key)) {
+ return isolate->heap()->true_value();
+ }
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
@@ -5521,40 +5582,39 @@ static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ Handle<Object> object = args.at<Object>(0);
uint32_t index;
const bool key_is_array_index = key->AsArrayIndex(&index);
- Object* obj = args[0];
// Only JS objects can have properties.
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
- if (object->HasRealNamedProperty(isolate, key)) {
+ if (JSObject::HasRealNamedProperty(js_obj, key)) {
ASSERT(!isolate->has_scheduled_exception());
return isolate->heap()->true_value();
} else {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
}
- Map* map = object->map();
+ Map* map = js_obj->map();
if (!key_is_array_index &&
!map->has_named_interceptor() &&
!HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
return isolate->heap()->false_value();
}
// Slow case.
- HandleScope scope(isolate);
return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>(object),
+ Handle<JSObject>(js_obj),
Handle<Name>(key));
- } else if (obj->IsString() && key_is_array_index) {
+ } else if (object->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
- String* string = String::cast(obj);
+ Handle<String> string = Handle<String>::cast(object);
if (index < static_cast<uint32_t>(string->length())) {
return isolate->heap()->true_value();
}
@@ -5564,12 +5624,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- bool result = receiver->HasProperty(key);
+ bool result = JSReceiver::HasProperty(receiver, key);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
@@ -5577,12 +5637,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
- bool result = receiver->HasElement(index);
+ bool result = JSReceiver::HasElement(receiver, index);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
@@ -5923,12 +5983,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Object* object = args[0];
- return (object->IsJSObject() && !object->IsGlobalObject())
- ? JSObject::cast(object)->TransformToFastProperties(0)
- : object;
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ if (object->IsJSObject() && !object->IsGlobalObject()) {
+ JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0);
+ }
+ return *object;
}
@@ -7945,21 +8006,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ FixedArray* array;
+ { MaybeObject* maybe_obj =
+ isolate->heap()->AllocateUninitializedFixedArray(length);
+ if (!maybe_obj->To(&array)) return maybe_obj;
}
DisallowHeapAllocation no_gc;
- FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
- array->set_length(length);
-
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
- JSObject::cast(result)->set_elements(FixedArray::cast(obj));
+ JSObject::cast(result)->set_elements(array);
}
return result;
}
@@ -8288,7 +8346,7 @@ bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
- if (!FLAG_crankshaft ||
+ if (!isolate->use_crankshaft() ||
function->shared()->optimization_disabled() ||
isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
@@ -8436,14 +8494,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- SealHandleScope shs(isolate);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8501,8 +8551,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
// Start patching from the currently patched loop nesting level.
int current_level = unoptimized->allow_osr_at_loop_nesting_level();
- ASSERT(Deoptimizer::VerifyInterruptCode(
- isolate, unoptimized, current_level));
+ ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
@@ -8560,6 +8609,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
+ RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ isolate->optimizing_compiler_thread()->Unblock();
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8572,7 +8628,7 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
Handle<Code> unoptimized) {
// Keep track of whether we've succeeded in optimizing.
- if (!unoptimized->optimizable()) return false;
+ if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@@ -8611,7 +8667,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
Handle<Code> result = Handle<Code>::null();
BailoutId ast_id = BailoutId::None();
- if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
+ if (FLAG_concurrent_osr) {
if (isolate->optimizing_compiler_thread()->
IsQueuedForOSR(function, pc_offset)) {
// Still waiting for the optimizing compiler thread to finish. Carry on.
@@ -8623,25 +8679,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
return NULL;
}
- OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+ RecompileJob* job = isolate->optimizing_compiler_thread()->
FindReadyOSRCandidate(function, pc_offset);
- if (compiler == NULL) {
+ if (job == NULL) {
if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
Compiler::RecompileConcurrent(function, pc_offset)) {
if (function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForConcurrentRecompilation()) {
// Prevent regular recompilation if we queue this for OSR.
// TODO(yangguo): remove this as soon as OSR becomes one-shot.
- function->ReplaceCode(function->shared()->code());
+ function->ReplaceCode(*unoptimized);
}
return NULL;
}
// Fall through to the end in case of failure.
} else {
// TODO(titzer): don't install the OSR code into the function.
- ast_id = compiler->info()->osr_ast_id();
- result = Compiler::InstallOptimizedCode(compiler);
+ ast_id = job->info()->osr_ast_id();
+ result = Compiler::InstallOptimizedCode(job);
}
} else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
@@ -8655,8 +8711,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
- // Revert the patched interrupt now, regardless of whether OSR succeeds.
- Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
+ // Revert the patched back edge table, regardless of whether OSR succeeds.
+ BackEdgeTable::Revert(isolate, *unoptimized);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -9193,7 +9249,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
// property from it.
if (!holder.is_null()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
- ASSERT(object->IsJSProxy() || object->HasProperty(*name));
+ ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name));
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
@@ -10174,7 +10230,7 @@ static bool IterateElements(Isolate* isolate,
Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
element_value = Object::GetElement(isolate, receiver, j);
@@ -10199,7 +10255,7 @@ static bool IterateElements(Isolate* isolate,
Handle<Object> element_value =
isolate->factory()->NewNumber(double_value);
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value =
@@ -10492,11 +10548,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
// property.
// Returns the number of non-undefined elements collected.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- return object->PrepareElementsForSort(limit);
+ return *JSObject::PrepareElementsForSort(object, limit);
}
@@ -10587,14 +10643,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_SMI_ARG_CHECKED(flag, 2);
AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
- return JSObject::cast(receiver)->LookupAccessor(name, component);
+ Handle<Object> result =
+ JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -10674,19 +10733,20 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
- receiver, structure, name);
- if (!maybe_value->ToObject(&value)) {
- if (maybe_value->IsRetryAfterGC()) return maybe_value;
- ASSERT(maybe_value->IsException());
- maybe_value = heap->isolate()->pending_exception();
+ Isolate* isolate = heap->isolate();
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(structure, isolate),
+ handle(name, isolate));
+ if (value.is_null()) {
+ MaybeObject* exception = heap->isolate()->pending_exception();
heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) {
- *caught_exception = true;
- }
- return maybe_value;
+ if (caught_exception != NULL) *caught_exception = true;
+ return exception;
}
- return value;
+ return *value;
} else {
return heap->undefined_value();
}
@@ -10874,7 +10934,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
PropertyAttributes attributes;
- return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
+ Handle<Object> result =
+ JSObject::GetPropertyWithInterceptor(obj, obj, name, &attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -11391,8 +11454,8 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, function_context, target)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, function_context, target)) {
return Handle<JSObject>();
}
@@ -11515,7 +11578,7 @@ static bool SetLocalVariableValue(Isolate* isolate,
!function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing
// property value.
SetProperty(isolate,
@@ -11549,8 +11612,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, closure_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, closure_scope)) {
return Handle<JSObject>();
}
@@ -11603,7 +11666,7 @@ static bool SetClosureVariableValue(Isolate* isolate,
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
SetProperty(isolate,
ext,
@@ -11670,8 +11733,8 @@ static Handle<JSObject> MaterializeBlockScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, block_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, block_scope)) {
return Handle<JSObject>();
}
@@ -11693,8 +11756,8 @@ static Handle<JSObject> MaterializeModuleScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, module_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, module_scope)) {
return Handle<JSObject>();
}
@@ -12646,7 +12709,8 @@ static Handle<JSObject> MaterializeArgumentsObject(
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
if (!function->shared()->is_function() ||
- target->HasLocalProperty(isolate->heap()->arguments_string())) {
+ JSReceiver::HasLocalProperty(target,
+ isolate->factory()->arguments_string())) {
return target;
}
@@ -14533,22 +14597,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
if (proto->IsNull()) return isolate->heap()->undefined_value();
ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
+ obj = handle(JSReceiver::cast(proto));
}
if (obj->IsJSProxy())
return isolate->heap()->undefined_value();
ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
- JSObject::cast(obj)->HasFastElements()));
+ Handle<JSObject>::cast(obj)->HasFastElements()));
ASSERT(obj->IsJSObject());
- return JSObject::cast(obj)->SetObserved(isolate);
+ JSObject::SetObserved(Handle<JSObject>::cast(obj));
+ return isolate->heap()->undefined_value();
}
@@ -14652,7 +14717,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<Cell> cell = Handle<Cell>::cast(type_info);
Handle<AllocationSite> site = Handle<AllocationSite>(
AllocationSite::cast(cell->value()), isolate);
- ASSERT(!site->IsLiteralSite());
+ ASSERT(!site->SitePointsToLiteral());
ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -14786,8 +14851,7 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
}
-void Runtime::PerformGC(Object* result) {
- Isolate* isolate = Isolate::Current();
+void Runtime::PerformGC(Object* result, Isolate* isolate) {
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
if (isolate->heap()->new_space()->AddFreshPage()) {
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 60c6677116..1b7e32e7a1 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -91,7 +91,6 @@ namespace internal {
F(TryInstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
- F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
@@ -100,6 +99,7 @@ namespace internal {
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
F(CompileForOnStackReplacement, 2, 1) \
F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \
@@ -299,7 +299,6 @@ namespace internal {
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
F(CreateObjectLiteral, 4, 1) \
- F(CreateObjectLiteralShallow, 4, 1) \
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
@@ -364,6 +363,7 @@ namespace internal {
F(ArrayBufferInitialize, 2, 1)\
F(ArrayBufferGetByteLength, 1, 1)\
F(ArrayBufferSliceImpl, 3, 1) \
+ F(ArrayBufferIsView, 1, 1) \
\
F(TypedArrayInitialize, 5, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
@@ -838,7 +838,7 @@ class Runtime : public AllStatic {
JSArrayBuffer* phantom_array_buffer);
// Helper functions used stubs.
- static void PerformGC(Object* result);
+ static void PerformGC(Object* result, Isolate* isolate);
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
static Handle<Object> CreateArrayLiteralBoilerplate(
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 5339570ef6..ce11c37079 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -526,8 +526,8 @@ function ToNumber(x) {
: %StringToNumber(x);
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- if (IS_SYMBOL(x)) return $NaN;
+ if (IS_UNDEFINED(x)) return NAN;
+ if (IS_SYMBOL(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
@@ -537,8 +537,8 @@ function NonNumberToNumber(x) {
: %StringToNumber(x);
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- if (IS_SYMBOL(x)) return $NaN;
+ if (IS_UNDEFINED(x)) return NAN;
+ if (IS_SYMBOL(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 0aaa1e9b77..684ef486c7 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -216,11 +216,7 @@ class Sampler::PlatformData : public PlatformDataCommon {
class SimulatorHelper {
public:
inline bool Init(Sampler* sampler, Isolate* isolate) {
- ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
- Isolate::PerIsolateThreadData* per_thread_data = isolate->
- FindPerThreadDataForThread(thread_id);
- if (!per_thread_data) return false;
- simulator_ = per_thread_data->simulator();
+ simulator_ = isolate->thread_local_top()->simulator_;
// Check if there is active simulator.
return simulator_ != NULL;
}
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 8b7cb569bd..26f840b23a 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -27,10 +27,14 @@
// Features shared by parsing and pre-parsing scanners.
+#include <cmath>
+
#include "scanner.h"
#include "../include/v8stdint.h"
#include "char-predicates-inl.h"
+#include "conversions-inl.h"
+#include "list-inl.h"
namespace v8 {
namespace internal {
@@ -1108,4 +1112,140 @@ bool Scanner::ScanRegExpFlags() {
return true;
}
+
+int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) {
+ return AddSymbol(Vector<const byte>::cast(key), true, value);
+}
+
+
+int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const byte>::cast(key), false, value);
+}
+
+
+int DuplicateFinder::AddSymbol(Vector<const byte> key,
+ bool is_ascii,
+ int value) {
+ uint32_t hash = Hash(key, is_ascii);
+ byte* encoding = BackupKey(key, is_ascii);
+ HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
+ int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ entry->value =
+ reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+ return old_value;
+}
+
+
+int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
+ ASSERT(key.length() > 0);
+ // Quick check for already being in canonical form.
+ if (IsNumberCanonical(key)) {
+ return AddAsciiSymbol(key, value);
+ }
+
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
+ double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ int length;
+ const char* string;
+ if (!std::isfinite(double_value)) {
+ string = "Infinity";
+ length = 8; // strlen("Infinity");
+ } else {
+ string = DoubleToCString(double_value,
+ Vector<char>(number_buffer_, kBufferSize));
+ length = StrLength(string);
+ }
+ return AddSymbol(Vector<const byte>(reinterpret_cast<const byte*>(string),
+ length), true, value);
+}
+
+
+bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
+ // Test for a safe approximation of number literals that are already
+ // in canonical form: max 15 digits, no leading zeroes, except an
+ // integer part that is a single zero, and no trailing zeros below
+ // the decimal point.
+ int pos = 0;
+ int length = number.length();
+ if (number.length() > 15) return false;
+ if (number[pos] == '0') {
+ pos++;
+ } else {
+ while (pos < length &&
+ static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
+ }
+ if (length == pos) return true;
+ if (number[pos] != '.') return false;
+ pos++;
+ bool invalid_last_digit = true;
+ while (pos < length) {
+ byte digit = number[pos] - '0';
+ if (digit > '9' - '0') return false;
+ invalid_last_digit = (digit == 0);
+ pos++;
+ }
+ return !invalid_last_digit;
+}
+
+
+uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) {
+ // Primitive hash function, almost identical to the one used
+ // for strings (except that it's seeded by the length and ASCII-ness).
+ int length = key.length();
+ uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+ for (int i = 0; i < length; i++) {
+ uint32_t c = key[i];
+ hash = (hash + c) * 1025;
+ hash ^= (hash >> 6);
+ }
+ return hash;
+}
+
+
+bool DuplicateFinder::Match(void* first, void* second) {
+ // Decode lengths.
+ // Length + ASCII-bit is encoded as base 128, most significant heptet first,
+ // with a 8th bit being non-zero while there are more heptets.
+ // The value encodes the number of bytes following, and whether the original
+ // was ASCII.
+ byte* s1 = reinterpret_cast<byte*>(first);
+ byte* s2 = reinterpret_cast<byte*>(second);
+ uint32_t length_ascii_field = 0;
+ byte c1;
+ do {
+ c1 = *s1;
+ if (c1 != *s2) return false;
+ length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+ s1++;
+ s2++;
+ } while ((c1 & 0x80) != 0);
+ int length = static_cast<int>(length_ascii_field >> 1);
+ return memcmp(s1, s2, length) == 0;
+}
+
+
+byte* DuplicateFinder::BackupKey(Vector<const byte> bytes,
+ bool is_ascii) {
+ uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+ backing_store_.StartSequence();
+ // Emit ascii_length as base-128 encoded number, with the 7th bit set
+ // on the byte of every heptet except the last, least significant, one.
+ if (ascii_length >= (1 << 7)) {
+ if (ascii_length >= (1 << 14)) {
+ if (ascii_length >= (1 << 21)) {
+ if (ascii_length >= (1 << 28)) {
+ backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+
+ backing_store_.AddBlock(bytes);
+ return backing_store_.EndSequence().start();
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index d7328085b7..3cefc833ac 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -34,6 +34,8 @@
#include "char-predicates.h"
#include "checks.h"
#include "globals.h"
+#include "hashmap.h"
+#include "list.h"
#include "token.h"
#include "unicode-inl.h"
#include "utils.h"
@@ -121,9 +123,10 @@ class Utf16CharacterStream {
};
-class UnicodeCache {
// ---------------------------------------------------------------------
// Caching predicates used by scanners.
+
+class UnicodeCache {
public:
UnicodeCache() {}
typedef unibrow::Utf8Decoder<512> Utf8Decoder;
@@ -148,6 +151,56 @@ class UnicodeCache {
};
+// ---------------------------------------------------------------------
+// DuplicateFinder discovers duplicate symbols.
+
+class DuplicateFinder {
+ public:
+ explicit DuplicateFinder(UnicodeCache* constants)
+ : unicode_constants_(constants),
+ backing_store_(16),
+ map_(&Match) { }
+
+ int AddAsciiSymbol(Vector<const char> key, int value);
+ int AddUtf16Symbol(Vector<const uint16_t> key, int value);
+ // Add a a number literal by converting it (if necessary)
+ // to the string that ToString(ToNumber(literal)) would generate.
+ // and then adding that string with AddAsciiSymbol.
+ // This string is the actual value used as key in an object literal,
+ // and the one that must be different from the other keys.
+ int AddNumber(Vector<const char> key, int value);
+
+ private:
+ int AddSymbol(Vector<const byte> key, bool is_ascii, int value);
+ // Backs up the key and its length in the backing store.
+ // The backup is stored with a base 127 encoding of the
+ // length (plus a bit saying whether the string is ASCII),
+ // followed by the bytes of the key.
+ byte* BackupKey(Vector<const byte> key, bool is_ascii);
+
+ // Compare two encoded keys (both pointing into the backing store)
+ // for having the same base-127 encoded lengths and ASCII-ness,
+ // and then having the same 'length' bytes following.
+ static bool Match(void* first, void* second);
+ // Creates a hash from a sequence of bytes.
+ static uint32_t Hash(Vector<const byte> key, bool is_ascii);
+ // Checks whether a string containing a JS number is its canonical
+ // form.
+ static bool IsNumberCanonical(Vector<const char> key);
+
+ // Size of buffer. Sufficient for using it to call DoubleToCString in
+ // from conversions.h.
+ static const int kBufferSize = 100;
+
+ UnicodeCache* unicode_constants_;
+ // Backing store used to store strings used as hashmap keys.
+ SequenceCollector<unsigned char> backing_store_;
+ HashMap map_;
+ // Buffer used for string->number->canonical string conversions.
+ char number_buffer_[kBufferSize];
+};
+
+
// ----------------------------------------------------------------------------
// LiteralBuffer - Collector of chars of literals.
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index ba138f2add..f1ae876ca3 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -363,14 +363,14 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
}
-bool ScopeInfo::CopyContextLocalsToScopeObject(
- Isolate* isolate,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- int local_count = ContextLocalCount();
+bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ Isolate* isolate = scope_info->GetIsolate();
+ int local_count = scope_info->ContextLocalCount();
if (local_count == 0) return true;
// Fill all context locals to the context extension.
- int start = ContextLocalNameEntriesIndex();
+ int start = scope_info->ContextLocalNameEntriesIndex();
int end = start + local_count;
for (int i = start; i < end; ++i) {
int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
@@ -378,7 +378,7 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(
isolate,
SetProperty(isolate,
scope_object,
- Handle<String>(String::cast(get(i))),
+ Handle<String>(String::cast(scope_info->get(i))),
Handle<Object>(context->get(context_index), isolate),
::NONE,
kNonStrictMode),
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index ce1741a623..ee327fb79f 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -437,8 +437,8 @@ Variable* Scope::LookupFunctionVar(Handle<String> name,
this, name, mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized);
VariableProxy* proxy = factory->NewVariableProxy(var);
- VariableDeclaration* declaration =
- factory->NewVariableDeclaration(proxy, mode, this);
+ VariableDeclaration* declaration = factory->NewVariableDeclaration(
+ proxy, mode, this, RelocInfo::kNoPosition);
DeclareFunctionVar(declaration);
var->AllocateTo(Variable::CONTEXT, index);
return var;
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index d05dd26122..7ed36665e2 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -532,55 +532,59 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
52,
"cpu_features");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
- UNCLASSIFIED,
- 53,
- "Heap::NewSpaceAllocationTopAddress");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
- UNCLASSIFIED,
- 54,
- "Heap::NewSpaceAllocationLimitAddress");
Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
UNCLASSIFIED,
- 55,
+ 53,
"Runtime::AllocateInNewSpace");
Add(ExternalReference::old_pointer_space_allocation_top_address(
isolate).address(),
UNCLASSIFIED,
- 56,
+ 54,
"Heap::OldPointerSpaceAllocationTopAddress");
Add(ExternalReference::old_pointer_space_allocation_limit_address(
isolate).address(),
UNCLASSIFIED,
- 57,
+ 55,
"Heap::OldPointerSpaceAllocationLimitAddress");
Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
UNCLASSIFIED,
- 58,
+ 56,
"Runtime::AllocateInOldPointerSpace");
Add(ExternalReference::old_data_space_allocation_top_address(
isolate).address(),
UNCLASSIFIED,
- 59,
+ 57,
"Heap::OldDataSpaceAllocationTopAddress");
Add(ExternalReference::old_data_space_allocation_limit_address(
isolate).address(),
UNCLASSIFIED,
- 60,
+ 58,
"Heap::OldDataSpaceAllocationLimitAddress");
Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
UNCLASSIFIED,
- 61,
+ 59,
"Runtime::AllocateInOldDataSpace");
Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
address(),
UNCLASSIFIED,
- 62,
+ 60,
"Heap::NewSpaceAllocationLimitAddress");
Add(ExternalReference::allocation_sites_list_address(isolate).address(),
UNCLASSIFIED,
- 63,
+ 61,
"Heap::allocation_sites_list_address()");
+ Add(ExternalReference::record_object_allocation_function(isolate).address(),
+ UNCLASSIFIED,
+ 62,
+ "HeapProfiler::RecordObjectAllocationFromMasm");
+ Add(ExternalReference::address_of_uint32_bias().address(),
+ UNCLASSIFIED,
+ 63,
+ "uint32_bias");
+ Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+ UNCLASSIFIED,
+ 64,
+ "Code::MarkCodeAsExecuted");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
@@ -835,6 +839,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
}
+ isolate_->heap()->InitializeWeakObjectToCodeTable();
+
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
@@ -1284,7 +1290,6 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
- ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@@ -1317,6 +1322,14 @@ void PartialSerializer::Serialize(Object** object) {
}
+bool Serializer::ShouldBeSkipped(Object** current) {
+ Object** roots = isolate()->heap()->roots_array_start();
+ return current == &roots[Heap::kStoreBufferTopRootIndex]
+ || current == &roots[Heap::kStackLimitRootIndex]
+ || current == &roots[Heap::kRealStackLimitRootIndex];
+}
+
+
void Serializer::VisitPointers(Object** start, Object** end) {
Isolate* isolate = this->isolate();;
@@ -1325,8 +1338,7 @@ void Serializer::VisitPointers(Object** start, Object** end) {
root_index_wave_front_ =
Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
}
- if (reinterpret_cast<Address>(current) ==
- isolate->heap()->store_buffer()->TopAddress()) {
+ if (ShouldBeSkipped(current)) {
sink_->Put(kSkip, "Skip");
sink_->PutInt(kPointerSize, "SkipOneWord");
} else if ((*current)->IsSmi()) {
@@ -1666,19 +1678,15 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
}
-void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
- Address* end) {
- Address references_start = reinterpret_cast<Address>(start);
+void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+ Address references_start = reinterpret_cast<Address>(p);
int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
- for (Address* current = start; current < end; current++) {
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- skip = 0;
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
- }
- bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
+ sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ int reference_id = serializer_->EncodeExternalReference(*p);
+ sink_->PutInt(reference_id, "reference id");
+ bytes_processed_so_far_ += kPointerSize;
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 020a744fc0..47627ac2dd 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -339,10 +339,6 @@ class Deserializer: public SerializerDeserializer {
private:
virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitExternalReferences(Address* start, Address* end) {
- UNREACHABLE();
- }
-
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
UNREACHABLE();
}
@@ -366,6 +362,10 @@ class Deserializer: public SerializerDeserializer {
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
+ HeapProfiler* profiler = isolate_->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(address, size);
+ }
return address;
}
@@ -517,7 +517,7 @@ class Serializer : public SerializerDeserializer {
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReferences(Address* start, Address* end);
+ void VisitExternalReference(Address* p);
void VisitExternalReference(RelocInfo* rinfo);
void VisitCodeTarget(RelocInfo* target);
void VisitCodeEntry(Address entry_address);
@@ -569,6 +569,10 @@ class Serializer : public SerializerDeserializer {
int SpaceAreaSize(int space);
+ // Some roots should not be serialized, because their actual value depends on
+ // absolute addresses and they are reset after deserialization, anyway.
+ bool ShouldBeSkipped(Object** current);
+
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references.
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc
index 96034e352b..4bdf63cedd 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot-common.cc
@@ -102,10 +102,19 @@ bool Snapshot::Initialize(const char* snapshot_file) {
DeleteArray(str);
return success;
} else if (size_ > 0) {
+ ElapsedTimer timer;
+ if (FLAG_profile_deserialization) {
+ timer.Start();
+ }
SnapshotByteSource source(raw_data_, raw_size_);
Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
- return V8::Initialize(&deserializer);
+ bool success = V8::Initialize(&deserializer);
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
+ }
+ return success;
}
return false;
}
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index be2ae2a57d..d5c114c5b0 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -28,6 +28,7 @@
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
+#include "heap-profiler.h"
#include "isolate.h"
#include "spaces.h"
#include "v8memory.h"
@@ -263,22 +264,28 @@ void Page::set_prev_page(Page* page) {
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
+ if (new_top > allocation_info_.limit()) return NULL;
- allocation_info_.top = new_top;
+ allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationType event) {
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -291,6 +298,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -299,6 +309,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
@@ -311,31 +324,36 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
if (FLAG_stress_compaction && !heap()->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+ if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
heap()->one_pointer_filler_map();
}
old_top += filler_size;
- allocation_info_.top += filler_size;
+ allocation_info_.set_top(allocation_info_.top() + filler_size);
}
}
#endif
- if (allocation_info_.limit - old_top < size_in_bytes) {
+ if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(old_top);
- allocation_info_.top += size_in_bytes;
+ HeapObject* obj = HeapObject::FromAddress(old_top);
+ allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+ if (profiler != NULL && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(obj->address(), size_in_bytes);
+ }
+
return obj;
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 2faf41912e..fe5eeb5e43 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -29,6 +29,7 @@
#include "macro-assembler.h"
#include "mark-compact.h"
+#include "msan.h"
#include "platform.h"
namespace v8 {
@@ -717,6 +718,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
executable,
owner);
result->set_reserved_memory(&reservation);
+ MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
return result;
}
@@ -958,8 +960,8 @@ PagedSpace::PagedSpace(Heap* heap,
* AreaSize();
accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this);
}
@@ -988,7 +990,7 @@ void PagedSpace::TearDown() {
size_t PagedSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
PageIterator it(this);
while (it.has_next()) {
@@ -1056,7 +1058,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 64 * kPointerSize * KB;
+ size = 72 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
@@ -1077,7 +1079,12 @@ intptr_t PagedSpace::SizeOfFirstPage() {
// upgraded to handle small pages.
size = AreaSize();
} else {
- size = 384 * KB;
+#if V8_TARGET_ARCH_MIPS
+ // TODO(plind): Investigate larger code stubs size on MIPS.
+ size = 480 * KB;
+#else
+ size = 416 * KB;
+#endif
}
break;
default:
@@ -1135,8 +1142,9 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
+ if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
if (unlink) {
@@ -1163,12 +1171,12 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (was_swept_conservatively_) return;
bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
+ (allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
+ if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
@@ -1279,8 +1287,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
to_space_.TearDown();
from_space_.TearDown();
@@ -1337,22 +1345,22 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(to_space_.page_low());
+ allocation_info_.set_limit(to_space_.page_high());
// Lower limit during incremental marking.
if (heap()->incremental_marking()->IsMarking() &&
inline_allocation_limit_step() != 0) {
Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+ allocation_info_.top() + inline_allocation_limit_step();
+ allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
}
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1371,7 +1379,7 @@ void NewSpace::ResetAllocationInfo() {
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
// The current page is already empty. Don't try to make another.
@@ -1403,15 +1411,16 @@ bool NewSpace::AddFreshPage() {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
+ if (allocation_info_.limit() < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
+ Address new_limit = Min(
+ allocation_info_.limit() + inline_allocation_limit_step_,
high);
+ allocation_info_.set_limit(new_limit);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
@@ -1520,20 +1529,18 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
ASSERT(!is_committed());
int pages = capacity_ / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - pages * Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
capacity_,
executable())) {
return false;
}
- NewSpacePage* page = anchor();
- for (int i = 1; i <= pages; i++) {
+ NewSpacePage* current = anchor();
+ for (int i = 0; i < pages; i++) {
NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
- new_page->InsertAfter(page);
- page = new_page;
+ NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+ new_page->InsertAfter(current);
+ current = new_page;
}
committed_ = true;
@@ -1577,20 +1584,18 @@ bool SemiSpace::GrowTo(int new_capacity) {
int pages_before = capacity_ / Page::kPageSize;
int pages_after = new_capacity / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - new_capacity;
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start, delta, executable())) {
+ start_ + capacity_, delta, executable())) {
return false;
}
capacity_ = new_capacity;
NewSpacePage* last_page = anchor()->prev_page();
ASSERT(last_page != anchor());
- for (int i = pages_before + 1; i <= pages_after; i++) {
- Address page_address = end - i * Page::kPageSize;
+ for (int i = pages_before; i < pages_after; i++) {
+ Address page_address = start_ + i * Page::kPageSize;
NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
page_address,
this);
@@ -1610,25 +1615,20 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
if (is_committed()) {
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
- if (!allocator->UncommitBlock(old_start, delta)) {
+ if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+ NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
new_last_page->set_next_page(anchor());
anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
}
capacity_ = new_capacity;
@@ -1975,7 +1975,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
@@ -2501,9 +2501,9 @@ bool NewSpace::ReserveSpace(int bytes) {
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
+ allocation_info_.set_top(allocation->address());
return true;
}
// There may be a borderline case here where the allocation succeeded, but
@@ -2549,9 +2549,9 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= AreaSize());
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
+ if (new_top <= allocation_info_.limit()) return true;
HeapObject* new_area = free_list_.Allocate(size_in_bytes);
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
@@ -2626,16 +2626,17 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
+ if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+ if (Page::FromAllocationTop(allocation_info_.top())->
+ IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
}
@@ -2685,6 +2686,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Try to expand the space and allocate in the new next page.
if (Expand()) {
+ ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
return free_list_.Allocate(size_in_bytes);
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 43f44a5c70..2cd92c59d8 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -1317,18 +1317,53 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
- AllocationInfo() : top(NULL), limit(NULL) {
+ AllocationInfo() : top_(NULL), limit_(NULL) {
}
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
+ INLINE(void set_top(Address top)) {
+ SLOW_ASSERT(top == NULL ||
+ (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ top_ = top;
+ }
+
+ INLINE(Address top()) const {
+ SLOW_ASSERT(top_ == NULL ||
+ (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ return top_;
+ }
+
+ Address* top_address() {
+ return &top_;
+ }
+
+ INLINE(void set_limit(Address limit)) {
+ SLOW_ASSERT(limit == NULL ||
+ (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+ limit_ = limit;
+ }
+
+ INLINE(Address limit()) const {
+ SLOW_ASSERT(limit_ == NULL ||
+ (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+ return limit_;
+ }
+
+ Address* limit_address() {
+ return &limit_;
+ }
#ifdef DEBUG
bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
- && (top <= limit);
+ return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
+ && (top_ <= limit_);
}
#endif
+
+ private:
+ // Current allocation top.
+ Address top_;
+ // Current allocation limit.
+ Address limit_;
};
@@ -1707,16 +1742,29 @@ class PagedSpace : public Space {
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
- Address limit() { return allocation_info_.limit; }
+ Address top() { return allocation_info_.top(); }
+ Address limit() { return allocation_info_.limit(); }
+
+ // The allocation top address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
+
+ enum AllocationType {
+ NEW_OBJECT,
+ MOVE_OBJECT
+ };
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(
+ int size_in_bytes,
+ AllocationType event = NEW_OBJECT);
virtual bool ReserveSpace(int bytes);
@@ -1738,9 +1786,9 @@ class PagedSpace : public Space {
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = top;
- allocation_info_.limit = limit;
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
}
void Allocate(int bytes) {
@@ -2381,9 +2429,15 @@ class NewSpace : public Space {
// Return the address of the allocation pointer in the active semispace.
Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+ return allocation_info_.top();
}
+
+ void set_top(Address top) {
+ ASSERT(to_space_.current_page()->ContainsLimit(top));
+ allocation_info_.set_top(top);
+ }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2408,9 +2462,15 @@ class NewSpace : public Space {
return reinterpret_cast<Address>(index << kPointerSizeLog2);
}
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation top and limit address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
@@ -2420,13 +2480,14 @@ class NewSpace : public Space {
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
} else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
+ Address new_limit = Min(
+ allocation_info_.top() + inline_allocation_limit_step_,
+ allocation_info_.limit());
+ allocation_info_.set_limit(new_limit);
}
- top_on_previous_step_ = allocation_info_.top;
+ top_on_previous_step_ = allocation_info_.top();
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2573,9 +2634,9 @@ class OldSpace : public PagedSpace {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
+ SLOW_ASSERT((space).page_low() <= (info).top() \
+ && (info).top() <= (space).page_high() \
+ && (info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h
index e1fcdee661..7e5432c841 100644
--- a/deps/v8/src/store-buffer-inl.h
+++ b/deps/v8/src/store-buffer-inl.h
@@ -41,6 +41,7 @@ Address StoreBuffer::TopAddress() {
void StoreBuffer::Mark(Address addr) {
ASSERT(!heap_->cell_space()->Contains(addr));
ASSERT(!heap_->code_space()->Contains(addr));
+ ASSERT(!heap_->old_data_space()->Contains(addr));
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
*top++ = addr;
heap_->public_set_store_buffer_top(top);
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index cb82c16634..14b44ca41f 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -28,7 +28,6 @@
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $String = global.String;
-// var $NaN = 0/0;
// -------------------------------------------------------------------
@@ -574,7 +573,7 @@ function StringSlice(start, end) {
var s_len = s.length;
var start_i = TO_INTEGER(start);
var end_i = s_len;
- if (end !== void 0) {
+ if (!IS_UNDEFINED(end)) {
end_i = TO_INTEGER(end);
}
@@ -699,7 +698,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
%_CallFunction(result, %_SubString(subject, start, end),
ArrayPushBuiltin);
} else {
- %_CallFunction(result, void 0, ArrayPushBuiltin);
+ %_CallFunction(result, UNDEFINED, ArrayPushBuiltin);
}
if (result.length === limit) break outer_loop;
}
@@ -756,7 +755,7 @@ function StringSubstr(start, n) {
// Correct n: If not given, set to string length; if explicitly
// set to undefined, zero, or negative, returns empty string.
- if (n === void 0) {
+ if (IS_UNDEFINED(n)) {
len = s.length;
} else {
len = TO_INTEGER(n);
@@ -765,7 +764,7 @@ function StringSubstr(start, n) {
// Correct start: If not given (or undefined), set to zero; otherwise
// convert to integer and handle negative case.
- if (start === void 0) {
+ if (IS_UNDEFINED(start)) {
start = 0;
} else {
start = TO_INTEGER(start);
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 7b23d0c96a..67002a36b1 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -99,21 +99,11 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
}
-Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- return Handle<JSObject>(IC::GetCodeCacheHolder(
- isolate_, *receiver, cache_holder));
-}
-
-
Handle<Code> StubCache::FindIC(Handle<Name> name,
Handle<Map> stub_holder_map,
Code::Kind kind,
- Code::StubType type,
Code::ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state, type);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state);
Handle<Object> probe(stub_holder_map->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -124,41 +114,22 @@ Handle<Code> StubCache::FindIC(Handle<Name> name,
Handle<Code> StubCache::FindIC(Handle<Name> name,
Handle<JSObject> stub_holder,
Code::Kind kind,
- Code::StubType type,
Code::ExtraICState extra_ic_state) {
- return FindIC(name, Handle<Map>(stub_holder->map()), kind,
- type, extra_ic_state);
+ return FindIC(name, Handle<Map>(stub_holder->map()), kind, extra_ic_state);
}
-Handle<Code> StubCache::FindLoadHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type) {
- Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
- receiver.is_identical_to(stub_holder) ? Code::OWN_STUB
- : Code::PROTOTYPE_STUB);
- ASSERT(type != Code::NORMAL);
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STUB, extra_ic_state, type, kind);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
- return Handle<Code>::null();
-}
-
-
-Handle<Code> StubCache::FindStoreHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Code::Kind kind,
- Code::StubType type,
- StrictModeFlag strict_mode) {
- Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
- STANDARD_STORE, strict_mode);
- ASSERT(type != Code::NORMAL);
+Handle<Code> StubCache::FindHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Code::Kind kind,
+ StrictModeFlag strict_mode) {
+ Code::ExtraICState extra_ic_state = Code::kNoExtraICState;
+ if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
+ extra_ic_state = Code::ComputeExtraICState(
+ STANDARD_STORE, strict_mode);
+ }
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STUB, extra_ic_state, type, kind);
+ Code::HANDLER, extra_ic_state, Code::NORMAL, kind);
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -166,66 +137,29 @@ Handle<Code> StubCache::FindStoreHandler(Handle<Name> name,
}
-Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
+Handle<Code> StubCache::ComputeMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ Code::Kind kind = handler->handler_kind();
Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(name, map, Code::LOAD_IC, handler->type());
+ Handle<Code> ic = FindIC(name, map, kind, strict_mode);
if (!ic.is_null()) return ic;
- LoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
-
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(
- Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(name, map, Code::KEYED_LOAD_IC, handler->type());
- if (!ic.is_null()) return ic;
-
- KeyedLoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
-
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(
- name, map, Code::STORE_IC, handler->type(), strict_mode);
- if (!ic.is_null()) return ic;
-
- StoreStubCompiler ic_compiler(isolate(), strict_mode);
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
-
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicKeyedStoreIC(
- Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(
- name, map, Code::KEYED_STORE_IC, handler->type(), strict_mode);
- if (!ic.is_null()) return ic;
-
- KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ if (kind == Code::LOAD_IC) {
+ LoadStubCompiler ic_compiler(isolate());
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ KeyedLoadStubCompiler ic_compiler(isolate());
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ } else if (kind == Code::STORE_IC) {
+ StoreStubCompiler ic_compiler(isolate(), strict_mode);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ } else {
+ ASSERT(kind == Code::KEYED_STORE_IC);
+ KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ }
HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
@@ -257,8 +191,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
- Handle<Code> handler = FindLoadHandler(
- cache_name, receiver, receiver, Code::LOAD_IC, Code::NONEXISTENT);
+ Handle<Code> handler = FindHandler(cache_name, receiver, Code::LOAD_IC);
if (!handler.is_null()) return handler;
LoadStubCompiler compiler(isolate_);
@@ -269,268 +202,22 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
}
-Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
- if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- return stub.GetCode(isolate());
- }
-
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::FIELD);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field, representation);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, callback);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadViaGetter(receiver, holder, name, getter);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<Object> value) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CONSTANT);
- if (!handler.is_null()) return handler;
-
- LoadStubCompiler compiler(isolate_);
- handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
-
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadNormal(Handle<Name> name,
- Handle<JSObject> receiver) {
- return isolate_->builtins()->LoadIC_Normal();
-}
-
-
Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
Handle<JSObject> receiver,
Handle<GlobalObject> holder,
Handle<PropertyCell> cell,
bool is_dont_delete) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindIC(name, stub_holder, Code::LOAD_IC, Code::NORMAL);
+ Handle<Code> stub = FindIC(name, receiver, Code::LOAD_IC);
if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
Handle<Code> ic =
compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- HeapObject::UpdateMapCodeCache(stub_holder, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
-Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
- if (receiver.is_identical_to(holder)) {
- // TODO(titzer): this should use an HObjectAccess
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- return stub.GetCode(isolate());
- }
-
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field, representation);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<Object> value) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC,
- Code::CONSTANT);
- if (!handler.is_null()) return handler;
-
- KeyedLoadStubCompiler compiler(isolate_);
- handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, callback);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::FIELD, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler =
- compiler.CompileStoreTransition(receiver, lookup, transition, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
Handle<Name> name =
@@ -575,26 +262,18 @@ Handle<Code> StubCache::ComputeKeyedStoreElement(
}
-Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return (strict_mode == kStrictMode)
- ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
- : isolate_->builtins()->Builtins::StoreIC_Normal();
-}
-
-
Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> receiver,
Handle<PropertyCell> cell,
Handle<Object> value,
StrictModeFlag strict_mode) {
- Isolate* isolate = cell->GetIsolate();
- Handle<Type> union_type(PropertyCell::UpdateType(cell, value), isolate);
+ Handle<Type> union_type = PropertyCell::UpdatedType(cell, value);
bool is_constant = union_type->IsConstant();
StoreGlobalStub stub(strict_mode, is_constant);
Handle<Code> code = FindIC(
name, Handle<JSObject>::cast(receiver),
- Code::STORE_IC, Code::NORMAL, stub.GetExtraICState());
+ Code::STORE_IC, stub.GetExtraICState());
if (!code.is_null()) return code;
// Replace the placeholder cell and global object map with the actual global
@@ -612,107 +291,6 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
}
-Handle<Code> StubCache::ComputeStoreCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback,
- StrictModeFlag strict_mode) {
- ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreCallback(
- receiver, holder, name, callback);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreCallback(
- receiver, holder, name, call_optimization);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreViaSetter(
- receiver, holder, name, setter);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name,
- Handle<JSObject> receiver,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreInterceptor(receiver, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::KEYED_STORE_IC, Code::FIELD, strict_mode);
- if (!stub.is_null()) return stub;
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
- Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedStoreTransition(
- Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
- Handle<Code> handler =
- compiler.CompileStoreTransition(receiver, lookup, transition, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
#define CALL_LOGGER_TAG(kind, type) \
(kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
@@ -858,17 +436,13 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
Handle<GlobalObject> holder,
Handle<PropertyCell> cell,
Handle<JSFunction> function) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *receiver, cache_holder));
Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::NORMAL, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ kind, extra_state, Code::NORMAL, argc);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+ CallStubCompiler compiler(isolate(), argc, kind, extra_state);
Handle<Code> code =
compiler.CompileCallGlobal(receiver, holder, cell, function, name);
ASSERT(flags == code->flags());
@@ -876,7 +450,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
if (CallStubCompiler::CanBeCached(function)) {
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
}
return code;
}
@@ -1036,7 +610,7 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
Handle<String> name(isolate_->heap()->empty_string());
if (!receiver_map->is_shared()) {
Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
- Code::NORMAL, stub.GetExtraICState());
+ stub.GetExtraICState());
if (!cached_ic.is_null()) return cached_ic;
}
@@ -1073,30 +647,25 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
}
-Handle<Code> StubCache::ComputePolymorphicLoadIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name) {
- LoadStubCompiler ic_compiler(isolate_);
- Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
- : Code::NORMAL;
- Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, type, PROPERTY);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputePolymorphicStoreIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- StoreStubCompiler ic_compiler(isolate_, strict_mode);
- Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
+Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode) {
+ Handle<Code> handler = handlers->at(0);
+ Code::Kind kind = handler->handler_kind();
+ Code::StubType type = number_of_valid_maps == 1 ? handler->type()
: Code::NORMAL;
- Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, type, PROPERTY);
- return ic;
+ if (kind == Code::LOAD_IC) {
+ LoadStubCompiler ic_compiler(isolate_);
+ return ic_compiler.CompilePolymorphicIC(
+ receiver_maps, handlers, name, type, PROPERTY);
+ } else {
+ ASSERT(kind == Code::STORE_IC);
+ StoreStubCompiler ic_compiler(isolate_, strict_mode);
+ return ic_compiler.CompilePolymorphicIC(
+ receiver_maps, handlers, name, type, PROPERTY);
+ }
}
@@ -1300,12 +869,12 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
+ HandleScope scope(isolate);
IC ic(IC::NO_EXTRA_FRAME, isolate);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
// Throw a reference error.
- HandleScope scope(isolate);
Handle<Name> name_handle(name);
Handle<Object> error =
isolate->factory()->NewReferenceError("not_defined",
@@ -1314,8 +883,8 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
}
-static MaybeObject* LoadWithInterceptor(Arguments* args,
- PropertyAttributes* attrs) {
+static Handle<Object> LoadWithInterceptor(Arguments* args,
+ PropertyAttributes* attrs) {
ASSERT(args->length() == StubCache::kInterceptorArgsLength);
Handle<Name> name_handle =
args->at<Name>(StubCache::kInterceptorArgsNameIndex);
@@ -1329,9 +898,10 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
Isolate* isolate = receiver_handle->GetIsolate();
// TODO(rossberg): Support symbols in the API.
- if (name_handle->IsSymbol())
- return holder_handle->GetPropertyPostInterceptor(
- *receiver_handle, *name_handle, attrs);
+ if (name_handle->IsSymbol()) {
+ return JSObject::GetPropertyPostInterceptor(
+ holder_handle, receiver_handle, name_handle, attrs);
+ }
Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
@@ -1344,24 +914,21 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
*receiver_handle,
*holder_handle);
{
- // Use the interceptor getter.
HandleScope scope(isolate);
+ // Use the interceptor getter.
v8::Handle<v8::Value> r =
callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!r.IsEmpty()) {
*attrs = NONE;
Handle<Object> result = v8::Utils::OpenHandle(*r);
result->VerifyApiCallResultType();
- return *result;
+ return scope.CloseAndEscape(result);
}
}
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attrs);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ Handle<Object> result = JSObject::GetPropertyPostInterceptor(
+ holder_handle, receiver_handle, name_handle, attrs);
return result;
}
@@ -1372,40 +939,42 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
PropertyAttributes attr = NONE;
- Object* result;
- { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ HandleScope scope(isolate);
+ Handle<Object> result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// If the property is present, return it.
- if (attr != ABSENT) return result;
+ if (attr != ABSENT) return *result;
return ThrowReferenceError(isolate, Name::cast(args[0]));
}
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
PropertyAttributes attr;
- MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ HandleScope scope(isolate);
+ Handle<Object> result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// This is call IC. In this case, we simply return the undefined result which
// will lead to an exception when trying to invoke the result as a
// function.
- return result;
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
- JSObject* recv = JSObject::cast(args[0]);
- Name* name = Name::cast(args[1]);
- Object* value = args[2];
+ Handle<JSObject> recv(JSObject::cast(args[0]));
+ Handle<Name> name(Name::cast(args[1]));
+ Handle<Object> value(args[2], isolate);
ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
- MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict_mode);
- return result;
+ Handle<Object> result = JSObject::SetPropertyWithInterceptor(
+ recv, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1599,7 +1168,7 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
#define __ ACCESS_MASM(masm())
-Register BaseLoadStubCompiler::HandlerFrontendHeader(
+Register LoadStubCompiler::HandlerFrontendHeader(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1613,7 +1182,7 @@ Register BaseLoadStubCompiler::HandlerFrontendHeader(
// HandlerFrontend for store uses the name register. It has to be restored
// before a miss.
-Register BaseStoreStubCompiler::HandlerFrontendHeader(
+Register StoreStubCompiler::HandlerFrontendHeader(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1639,7 +1208,7 @@ Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object,
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadField(
+Handle<Code> LoadStubCompiler::CompileLoadField(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1659,7 +1228,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadField(
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
+Handle<Code> LoadStubCompiler::CompileLoadConstant(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1674,7 +1243,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1691,7 +1260,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1710,7 +1279,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
@@ -1730,7 +1299,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
}
-void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
+void LoadStubCompiler::GenerateLoadPostInterceptor(
Register interceptor_reg,
Handle<JSObject> interceptor_holder,
Handle<Name> name,
@@ -1789,14 +1358,14 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
HandlerFrontend(object, receiver(), holder, name, &success);
__ bind(&success);
- GenerateLoadViaGetter(masm(), getter);
+ GenerateLoadViaGetter(masm(), receiver(), getter);
// Return the generated code.
return GetCode(kind(), Code::CALLBACKS, name);
}
-Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
+Handle<Code> StoreStubCompiler::CompileStoreTransition(
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
@@ -1849,13 +1418,13 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
TailCallBuiltin(masm(), SlowBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::MAP_TRANSITION, name);
+ return GetCode(kind(), Code::TRANSITION, name);
}
-Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Name> name) {
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name) {
Label miss;
HandlerFrontendHeader(object, receiver(), object, name, &miss);
@@ -1948,23 +1517,33 @@ void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) {
}
-void LoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-}
-
-
-void KeyedLoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-}
-
-
-void StoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+void BaseLoadStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ GDBJITInterface::CodeTag tag;
+ if (kind_ == Code::LOAD_IC) {
+ tag = GDBJITInterface::LOAD_IC;
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ tag = GDBJITInterface::KEYED_LOAD_IC;
+ } else if (kind_ == Code::STORE_IC) {
+ tag = GDBJITInterface::STORE_IC;
+ } else {
+ tag = GDBJITInterface::KEYED_STORE_IC;
+ }
+ GDBJIT(AddCode(tag, *name, *code));
+#endif
}
-void KeyedStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+void BaseLoadStoreStubCompiler::InitializeRegisters() {
+ if (kind_ == Code::LOAD_IC) {
+ registers_ = LoadStubCompiler::registers();
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ registers_ = KeyedLoadStubCompiler::registers();
+ } else if (kind_ == Code::STORE_IC) {
+ registers_ = StoreStubCompiler::registers();
+ } else {
+ registers_ = KeyedStoreStubCompiler::registers();
+ }
}
@@ -1972,21 +1551,7 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name,
InlineCacheState state) {
- Code::Flags flags = Code::ComputeFlags(
- kind, state, extra_state(), type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
- JitEvent(name, code);
- return code;
-}
-
-
-Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name) {
- ASSERT(type != Code::NORMAL);
- Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState, type, kind);
+ Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -1994,12 +1559,12 @@ Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind,
}
-Handle<Code> BaseStoreStubCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name) {
+Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name) {
ASSERT(type != Code::NORMAL);
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, extra_state(), type, kind);
+ Code::HANDLER, MONOMORPHIC, extra_state(), type, kind);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 63cb42b46e..38bc7a3c3a 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -83,83 +83,28 @@ class StubCache {
Handle<Code> FindIC(Handle<Name> name,
Handle<Map> stub_holder_map,
Code::Kind kind,
- Code::StubType type,
Code::ExtraICState extra_state = Code::kNoExtraICState);
Handle<Code> FindIC(Handle<Name> name,
Handle<JSObject> stub_holder,
Code::Kind kind,
- Code::StubType type,
Code::ExtraICState extra_state = Code::kNoExtraICState);
- Handle<Code> FindLoadHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type);
-
- Handle<Code> FindStoreHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Code::Kind kind,
- Code::StubType type,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name);
-
- Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name);
+ Handle<Code> FindHandler(Handle<Name> name,
+ Handle<JSObject> receiver,
+ Code::Kind kind,
+ StrictModeFlag strict_mode = kNonStrictMode);
- Handle<Code> ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeMonomorphicIC(Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
Handle<JSObject> object);
- Handle<Code> ComputeLoadField(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex field_index,
- Representation representation);
-
- Handle<Code> ComputeLoadCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> ComputeLoadCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization);
-
- Handle<Code> ComputeLoadViaGetter(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> getter);
-
- Handle<Code> ComputeLoadConstant(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Object> value);
-
- Handle<Code> ComputeLoadInterceptor(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeLoadNormal(Handle<Name> name,
- Handle<JSObject> object);
-
Handle<Code> ComputeLoadGlobal(Handle<Name> name,
Handle<JSObject> object,
Handle<GlobalObject> holder,
@@ -195,69 +140,18 @@ class StubCache {
Handle<JSObject> object,
Handle<JSObject> holder);
- // ---
-
- Handle<Code> ComputeStoreField(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreTransition(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
-
Handle<Code> ComputeStoreGlobal(Handle<Name> name,
Handle<GlobalObject> object,
Handle<PropertyCell> cell,
Handle<Object> value,
StrictModeFlag strict_mode);
- Handle<Code> ComputeStoreCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- const CallOptimization& call_optimation,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreViaSetter(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreInterceptor(Handle<Name> name,
- Handle<JSObject> object,
- StrictModeFlag strict_mode);
-
- // ---
-
- Handle<Code> ComputeKeyedStoreField(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- StrictModeFlag strict_mode);
- Handle<Code> ComputeKeyedStoreTransition(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode);
- // ---
-
Handle<Code> ComputeCallField(int argc,
Code::Kind,
Code::ExtraICState extra_state,
@@ -326,16 +220,11 @@ class StubCache {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- Handle<Code> ComputePolymorphicLoadIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name);
-
- Handle<Code> ComputePolymorphicStoreIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ StrictModeFlag strict_mode);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
@@ -572,8 +461,7 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss_label,
- bool support_wrappers);
+ Label* miss_label);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
@@ -652,8 +540,10 @@ enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
class BaseLoadStoreStubCompiler: public StubCompiler {
public:
- BaseLoadStoreStubCompiler(Isolate* isolate, Register* registers)
- : StubCompiler(isolate), registers_(registers) { }
+ BaseLoadStoreStubCompiler(Isolate* isolate, Code::Kind kind)
+ : StubCompiler(isolate), kind_(kind) {
+ InitializeRegisters();
+ }
virtual ~BaseLoadStoreStubCompiler() { }
Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
@@ -698,30 +588,53 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
Handle<Name> name,
Label* success);
+ Handle<Code> GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name);
+
Handle<Code> GetICCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name,
InlineCacheState state = MONOMORPHIC);
+ Code::Kind kind() { return kind_; }
+
+ Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
+ if (kind_ == Code::LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind_ == Code::STORE_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG;
+ } else {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
+ }
+ }
+ void JitEvent(Handle<Name> name, Handle<Code> code);
virtual Code::ExtraICState extra_state() { return Code::kNoExtraICState; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
- virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0;
- virtual Code::Kind kind() = 0;
virtual Register receiver() = 0;
virtual Register name() = 0;
virtual Register scratch1() = 0;
virtual Register scratch2() = 0;
virtual Register scratch3() = 0;
+ void InitializeRegisters();
+
+ Code::Kind kind_;
Register* registers_;
};
-class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
+class LoadStubCompiler: public BaseLoadStoreStubCompiler {
public:
- BaseLoadStubCompiler(Isolate* isolate, Register* registers)
- : BaseLoadStoreStubCompiler(isolate, registers) { }
- virtual ~BaseLoadStubCompiler() { }
+ LoadStubCompiler(Isolate* isolate, Code::Kind kind = Code::LOAD_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind) { }
+ virtual ~LoadStubCompiler() { }
Handle<Code> CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
@@ -748,6 +661,28 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<JSObject> holder,
Handle<Name> name);
+ Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> getter);
+
+ static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
+ Handle<JSFunction> getter);
+
+ Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Handle<GlobalObject> global);
+
+ Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete);
+
+ static Register* registers();
+
protected:
virtual Register HandlerFrontendHeader(Handle<JSObject> object,
Register object_reg,
@@ -789,10 +724,6 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
LookupResult* lookup);
- Handle<Code> GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name);
-
virtual Register receiver() { return registers_[0]; }
virtual Register name() { return registers_[1]; }
virtual Register scratch1() { return registers_[2]; }
@@ -802,46 +733,10 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
};
-class LoadStubCompiler: public BaseLoadStubCompiler {
- public:
- explicit LoadStubCompiler(Isolate* isolate)
- : BaseLoadStubCompiler(isolate, registers()) { }
-
- Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<GlobalObject> global);
-
- static void GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<Name> name,
- bool is_dont_delete);
-
- private:
- static Register* registers();
- virtual Code::Kind kind() { return Code::LOAD_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
-};
-
-
-class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
+class KeyedLoadStubCompiler: public LoadStubCompiler {
public:
explicit KeyedLoadStubCompiler(Isolate* isolate)
- : BaseLoadStubCompiler(isolate, registers()) { }
+ : LoadStubCompiler(isolate, Code::KEYED_LOAD_IC) { }
Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
@@ -850,30 +745,26 @@ class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- private:
+ protected:
static Register* registers();
- virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
+
+ private:
virtual void GenerateNameCheck(Handle<Name> name,
Register name_reg,
Label* miss);
+ friend class BaseLoadStoreStubCompiler;
};
-class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
+class StoreStubCompiler: public BaseLoadStoreStubCompiler {
public:
- BaseStoreStubCompiler(Isolate* isolate,
- StrictModeFlag strict_mode,
- Register* registers)
- : BaseLoadStoreStubCompiler(isolate, registers),
+ StoreStubCompiler(Isolate* isolate,
+ StrictModeFlag strict_mode,
+ Code::Kind kind = Code::STORE_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind),
strict_mode_(strict_mode) { }
- virtual ~BaseStoreStubCompiler() { }
+ virtual ~StoreStubCompiler() { }
Handle<Code> CompileStoreTransition(Handle<JSObject> object,
LookupResult* lookup,
@@ -914,16 +805,27 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
Register scratch2,
Label* miss_label);
- static Builtins::Name MissBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
- case Code::STORE_IC: return Builtins::kStoreIC_Miss;
- case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
- case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
- default: UNREACHABLE();
- }
- return Builtins::kLoadIC_Miss;
- }
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
+
+ static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<JSFunction> setter);
+
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> setter);
+
+ Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
+ Handle<Name> name);
+
static Builtins::Name SlowBuiltin(Code::Kind kind) {
switch (kind) {
case Code::STORE_IC: return Builtins::kStoreIC_Slow;
@@ -943,10 +845,6 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
virtual void HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss);
- Handle<Code> GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name);
-
void GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name);
@@ -960,56 +858,21 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
StrictModeFlag strict_mode() { return strict_mode_; }
virtual Code::ExtraICState extra_state() { return strict_mode_; }
- private:
- StrictModeFlag strict_mode_;
-};
-
-
-class StoreStubCompiler: public BaseStoreStubCompiler {
- public:
- StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
- : BaseStoreStubCompiler(isolate, strict_mode, registers()) { }
-
-
- Handle<Code> CompileStoreCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> CompileStoreCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization);
-
- static void GenerateStoreViaSetter(MacroAssembler* masm,
- Handle<JSFunction> setter);
-
- Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<JSFunction> setter);
-
- Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
- Handle<Name> name);
+ protected:
+ static Register* registers();
private:
- static Register* registers();
- virtual Code::Kind kind() { return Code::STORE_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
+ StrictModeFlag strict_mode_;
+ friend class BaseLoadStoreStubCompiler;
};
-class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
+class KeyedStoreStubCompiler: public StoreStubCompiler {
public:
KeyedStoreStubCompiler(Isolate* isolate,
StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode)
- : BaseStoreStubCompiler(isolate, strict_mode, registers()),
+ : StoreStubCompiler(isolate, strict_mode, Code::KEYED_STORE_IC),
store_mode_(store_mode) { }
Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
@@ -1026,24 +889,18 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
virtual Code::ExtraICState extra_state() {
return Code::ComputeExtraICState(store_mode_, strict_mode());
}
+ static Register* registers();
private:
Register transition_map() {
return registers()[3];
}
- static Register* registers();
- virtual Code::Kind kind() { return Code::KEYED_STORE_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
virtual void GenerateNameCheck(Handle<Name> name,
Register name_reg,
Label* miss);
KeyedAccessStoreMode store_mode_;
+ friend class BaseLoadStoreStubCompiler;
};
@@ -1070,7 +927,7 @@ class CallStubCompiler: public StubCompiler {
int argc,
Code::Kind kind,
Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder);
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
Handle<Code> CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 190eb3e6ff..65d1364058 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -128,6 +128,16 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
}
+bool TypeFeedbackOracle::LoadIsPreMonomorphic(Property* expr) {
+ Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC;
+ }
+ return false;
+}
+
+
bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
@@ -166,6 +176,16 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
}
+bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) {
+ Handle<Object> map_or_code = GetInfo(ast_id);
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ return code->ic_state() == PREMONOMORPHIC;
+ }
+ return false;
+}
+
+
bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
@@ -251,7 +271,7 @@ void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name,
SmallMapList* types) {
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
}
@@ -261,7 +281,7 @@ void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name,
SmallMapList* types) {
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::STORE_IC);
CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
}
@@ -381,20 +401,29 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg) {
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
- // For some binary ops we don't have ICs, e.g. Token::COMMA.
+ // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
+ // operations covered by the BinaryOpStub we should always have them.
+ ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN &&
+ operation <= BinaryOpStub::LAST_TOKEN));
*left = *right = *result = handle(Type::None(), isolate_);
return;
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT(code->is_binary_op_stub());
- int minor_key = code->stub_info();
- BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
- *fixed_right_arg =
- BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
+ BinaryOpStub stub(code->extended_extra_ic_state());
+
+ // Sanity check.
+ ASSERT(stub.operation() == operation);
+
+ *left = stub.GetLeftType(isolate());
+ *right = stub.GetRightType(isolate());
+ *result = stub.GetResultType(isolate());
+ *fixed_right_arg = stub.fixed_right_arg();
}
@@ -410,36 +439,15 @@ Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
}
-TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
+ Handle<Type> unknown(Type::None(), isolate_);
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &unused_result_type);
- // CountOperations should always have +1 or -1 as their right input.
- ASSERT(right_type == BinaryOpIC::SMI ||
- right_type == BinaryOpIC::UNINITIALIZED);
-
- switch (left_type) {
- case BinaryOpIC::UNINITIALIZED:
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
- UNREACHABLE();
- return unknown;
+ BinaryOpStub stub(code->extended_extra_ic_state());
+ return stub.GetLeftType(isolate());
}
@@ -634,12 +642,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
case Code::KEYED_LOAD_IC:
case Code::KEYED_STORE_IC:
- if (target->ic_state() == MONOMORPHIC ||
- target->ic_state() == POLYMORPHIC) {
- SetInfo(ast_id, target);
- }
- break;
-
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 4b376c84bd..f295c06dac 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -243,9 +243,11 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
+ bool LoadIsPreMonomorphic(Property* expr);
bool LoadIsPolymorphic(Property* expr);
bool StoreIsUninitialized(TypeFeedbackId ast_id);
bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
+ bool StoreIsPreMonomorphic(TypeFeedbackId ast_id);
bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
@@ -301,7 +303,8 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg);
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation);
void CompareType(TypeFeedbackId id,
Handle<Type>* left,
@@ -310,7 +313,7 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Type> ClauseType(TypeFeedbackId id);
- TypeInfo IncrementType(CountOperation* expr);
+ Handle<Type> IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 7bd16f670b..1e67bc30c6 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -30,7 +30,7 @@
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $Array = global.Array;
-
+var $ArrayBuffer = global.ArrayBuffer;
// --------------- Typed Arrays ---------------------
@@ -70,15 +70,17 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
function ConstructByLength(obj, length) {
var l = ToPositiveInteger(length, "invalid_typed_array_length");
var byteLength = l * elementSize;
- var buffer = new global.ArrayBuffer(byteLength);
+ var buffer = new $ArrayBuffer(byteLength);
%TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
}
function ConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
- var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
if(!%TypedArrayInitializeFromArrayLike(obj, arrayId, arrayLike, l)) {
for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
obj[i] = arrayLike[i];
}
}
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 70ddccd6a7..17a19b29e4 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -128,11 +128,19 @@ int Type::LubBitset() {
Handle<v8::internal::Object> value = this->as_constant();
if (value->IsSmi()) return kSmi;
map = HeapObject::cast(*value)->map();
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ int32_t i;
+ uint32_t u;
+ if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
+ if (value->ToUint32(&u)) return kUnsigned32;
+ return kDouble;
+ }
if (map->instance_type() == ODDBALL_TYPE) {
if (value->IsUndefined()) return kUndefined;
if (value->IsNull()) return kNull;
if (value->IsTrue() || value->IsFalse()) return kBoolean;
- if (value->IsTheHole()) return kAny;
+ if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
+ UNREACHABLE();
}
}
switch (map->instance_type()) {
@@ -230,8 +238,9 @@ int Type::GlbBitset() {
// Check this <= that.
-bool Type::IsSlowCase(Type* that) {
+bool Type::SlowIs(Type* that) {
// Fast path for bitsets.
+ if (this->is_none()) return true;
if (that->is_bitset()) {
return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
}
@@ -518,9 +527,13 @@ void Type::TypePrint(FILE* out) {
}
PrintF(out, "}");
} else if (is_constant()) {
- PrintF(out, "Constant(%p)", static_cast<void*>(*as_constant()));
+ PrintF(out, "Constant(%p : ", static_cast<void*>(*as_constant()));
+ from_bitset(LubBitset())->TypePrint(out);
+ PrintF(")");
} else if (is_class()) {
- PrintF(out, "Class(%p)", static_cast<void*>(*as_class()));
+ PrintF(out, "Class(%p < ", static_cast<void*>(*as_class()));
+ from_bitset(LubBitset())->TypePrint(out);
+ PrintF(")");
} else if (is_union()) {
PrintF(out, "{");
Handle<Unioned> unioned = as_union();
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 2810ffc8a1..5d437e26b2 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -128,6 +128,7 @@ namespace internal {
V(Receiver, kObject | kProxy) \
V(Allocated, kDouble | kName | kReceiver) \
V(Any, kOddball | kNumber | kAllocated | kInternal) \
+ V(NonNumber, kAny - kNumber) \
V(Detectable, kAllocated - kUndetectable)
#define TYPE_LIST(V) \
@@ -155,7 +156,7 @@ class Type : public Object {
static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
static Type* Optional(Handle<Type> type); // type \/ Undefined
- bool Is(Type* that) { return (this == that) ? true : IsSlowCase(that); }
+ bool Is(Type* that) { return (this == that) ? true : SlowIs(that); }
bool Is(Handle<Type> that) { return this->Is(*that); }
bool Maybe(Type* that);
bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
@@ -225,12 +226,13 @@ class Type : public Object {
kUnusedEOL = 0
};
+ bool is_none() { return this == None(); }
bool is_bitset() { return this->IsSmi(); }
bool is_class() { return this->IsMap(); }
bool is_constant() { return this->IsBox(); }
bool is_union() { return this->IsFixedArray(); }
- bool IsSlowCase(Type* that);
+ bool SlowIs(Type* that);
int as_bitset() { return Smi::cast(this)->value(); }
Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
@@ -298,10 +300,18 @@ struct Bounds {
Handle<Type> upper;
Bounds() {}
- Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {}
- Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {}
- explicit Bounds(Handle<Type> t) : lower(t), upper(t) {}
- Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {}
+ Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {
+ ASSERT(lower->Is(upper));
+ }
+ Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {
+ ASSERT(lower->Is(upper));
+ }
+ explicit Bounds(Handle<Type> t) : lower(t), upper(t) {
+ ASSERT(lower->Is(upper));
+ }
+ Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {
+ ASSERT(lower->Is(upper));
+ }
// Unrestricted bounds.
static Bounds Unbounded(Isolate* isl) {
@@ -310,9 +320,11 @@ struct Bounds {
// Meet: both b1 and b2 are known to hold.
static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
- return Bounds(
- handle(Type::Union(b1.lower, b2.lower), isl),
- handle(Type::Intersect(b1.upper, b2.upper), isl));
+ Handle<Type> lower(Type::Union(b1.lower, b2.lower), isl);
+ Handle<Type> upper(Type::Intersect(b1.upper, b2.upper), isl);
+ // Lower bounds are considered approximate, correct as necessary.
+ lower = handle(Type::Intersect(lower, upper), isl);
+ return Bounds(lower, upper);
}
// Join: either b1 or b2 is known to hold.
@@ -323,10 +335,14 @@ struct Bounds {
}
static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+ // Lower bounds are considered approximate, correct as necessary.
+ t = handle(Type::Intersect(t, b.upper), isl);
return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
}
static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
- return Bounds(b.lower, handle(Type::Intersect(b.upper, t), isl));
+ return Bounds(
+ handle(Type::Intersect(b.lower, t), isl),
+ handle(Type::Intersect(b.upper, t), isl));
}
};
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 34bb64bd7d..03c1ad16ef 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -206,6 +206,11 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
}
+void AstTyper::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Collect type feedback.
if (!stmt->cond()->ToBooleanIsTrue()) {
@@ -247,8 +252,8 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
RECURSE(Visit(stmt->cond()));
}
RECURSE(Visit(stmt->body()));
- store_.Forget(); // Control may transfer here via 'continue'.
if (stmt->next() != NULL) {
+ store_.Forget(); // Control may transfer here via 'continue'.
RECURSE(Visit(stmt->next()));
}
store_.Forget(); // Control may transfer here via termination or 'break'.
@@ -305,7 +310,7 @@ void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) {
+void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
@@ -543,7 +548,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg);
+ &left_type, &right_type, &type, &fixed_right_arg, expr->op());
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
@@ -577,10 +582,15 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Type* upper = Type::Union(
- expr->left()->bounds().upper, expr->right()->bounds().upper);
- if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
- NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
+ Handle<Type> upper(
+ Type::Union(
+ expr->left()->bounds().upper, expr->right()->bounds().upper),
+ isolate_);
+ if (!upper->Is(Type::Signed32()))
+ upper = handle(Type::Signed32(), isolate_);
+ Handle<Type> lower(Type::Intersect(
+ handle(Type::Smi(), isolate_), upper), isolate_);
+ NarrowType(expr, Bounds(lower, upper));
break;
}
case Token::BIT_XOR:
@@ -593,7 +603,10 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SHR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_));
+ // TODO(rossberg): The upper bound would be Unsigned32, but since there
+ // is no 'positive Smi' type for the lower bound, we use the smallest
+ // union of Smi and Unsigned32 as upper bound instead.
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -601,15 +614,17 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Bounds l = expr->left()->bounds();
Bounds r = expr->right()->bounds();
Type* lower =
- l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::Smi() :
+ l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ?
+ Type::None() :
l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
- Type::String() : Type::None();
+ Type::String() :
+ l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
+ Type::Smi() : Type::None();
Type* upper =
- l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
- Type::Number() :
l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
- Type::String() : Type::NumberOrString();
+ Type::String() :
+ l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
+ Type::Number() : Type::NumberOrString();
NarrowType(expr, Bounds(lower, upper, isolate_));
break;
}
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 42a81824ba..f1dcad0bcb 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -29,7 +29,7 @@
#define V8_UNICODE_H_
#include <sys/types.h>
-#include <globals.h>
+#include "globals.h"
/**
* \file
* Definitions and convenience functions for working with unicode.
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index 7ae704a26a..a93b046993 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -29,6 +29,7 @@
#define V8_HYDROGEN_UNIQUE_H_
#include "handles.h"
+#include "objects.h"
#include "utils.h"
#include "zone.h"
@@ -53,19 +54,30 @@ class UniqueSet;
template <typename T>
class Unique V8_FINAL {
public:
- // TODO(titzer): make private and introduce some builder/owner class.
+ // TODO(titzer): make private and introduce a uniqueness scope.
explicit Unique(Handle<T> handle) {
if (handle.is_null()) {
raw_address_ = NULL;
} else {
+ // This is a best-effort check to prevent comparing Unique<T>'s created
+ // in different GC eras; we require heap allocation to be disallowed at
+ // creation time.
+ // NOTE: we currently consider maps to be non-movable, so no special
+ // assurance is required for creating a Unique<Map>.
+ // TODO(titzer): other immortable immovable objects are also fine.
+ ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
raw_address_ = reinterpret_cast<Address>(*handle);
- ASSERT_NE(raw_address_, NULL);
+ ASSERT_NE(raw_address_, NULL); // Non-null should imply non-zero address.
}
handle_ = handle;
}
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ Unique(Address raw_address, Handle<T> handle)
+ : raw_address_(raw_address), handle_(handle) { }
+
// Constructor for handling automatic up casting.
- // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
+ // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
template <class S> Unique(Unique<S> uniq) {
#ifdef DEBUG
T* a = NULL;
@@ -74,34 +86,57 @@ class Unique V8_FINAL {
USE(a);
#endif
raw_address_ = uniq.raw_address_;
- handle_ = uniq.handle_; // Creates a new handle sharing the same location.
+ handle_ = uniq.handle_;
}
template <typename U>
- bool operator==(const Unique<U>& other) const {
+ inline bool operator==(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ == other.raw_address_;
}
template <typename U>
- bool operator!=(const Unique<U>& other) const {
+ inline bool operator!=(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ != other.raw_address_;
}
- intptr_t Hashcode() const {
+ inline intptr_t Hashcode() const {
+ ASSERT(IsInitialized());
return reinterpret_cast<intptr_t>(raw_address_);
}
- bool IsNull() {
+ inline bool IsNull() const {
+ ASSERT(IsInitialized());
return raw_address_ == NULL;
}
- // Don't do this unless you have access to the heap!
- // No, seriously! You can compare and hash and set-ify uniques that were
- // all created at the same time; please don't dereference.
- Handle<T> handle() {
+ inline bool IsKnownGlobal(void* global) const {
+ ASSERT(IsInitialized());
+ return raw_address_ == reinterpret_cast<Address>(global);
+ }
+
+ inline Handle<T> handle() const {
return handle_;
}
+ template <class S> static Unique<T> cast(Unique<S> that) {
+ return Unique<T>(that.raw_address_, Handle<T>::cast(that.handle_));
+ }
+
+ inline bool IsInitialized() const {
+ return raw_address_ != NULL || handle_.is_null();
+ }
+
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ static Unique<T> CreateUninitialized(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(NULL), handle);
+ }
+
+ static Unique<T> CreateImmovable(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(*handle), handle);
+ }
+
friend class UniqueSet<T>; // Uses internal details for speed.
template <class U>
friend class Unique; // For comparing raw_address values.
@@ -120,6 +155,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Add a new element to this unique set. Mutates this set. O(|this|).
void Add(Unique<T> uniq, Zone* zone) {
+ ASSERT(uniq.IsInitialized());
// Keep the set sorted by the {raw_address} of the unique elements.
for (int i = 0; i < size_; i++) {
if (array_[i] == uniq) return;
@@ -137,8 +173,19 @@ class UniqueSet V8_FINAL : public ZoneObject {
array_[size_++] = uniq;
}
+ // Remove an element from this set. Mutates this set. O(|this|)
+ void Remove(Unique<T> uniq) {
+ for (int i = 0; i < size_; i++) {
+ if (array_[i] == uniq) {
+ while (++i < size_) array_[i - 1] = array_[i];
+ size_--;
+ return;
+ }
+ }
+ }
+
// Compare this set against another set. O(|this|).
- bool Equals(UniqueSet<T>* that) {
+ bool Equals(UniqueSet<T>* that) const {
if (that->size_ != this->size_) return false;
for (int i = 0; i < this->size_; i++) {
if (this->array_[i] != that->array_[i]) return false;
@@ -146,8 +193,18 @@ class UniqueSet V8_FINAL : public ZoneObject {
return true;
}
+ // Check whether this set contains the given element. O(|this|)
+ // TODO(titzer): use binary search for large sets to make this O(log|this|)
+ template <typename U>
+ bool Contains(Unique<U> elem) const {
+ for (int i = 0; i < size_; i++) {
+ if (this->array_[i] == elem) return true;
+ }
+ return false;
+ }
+
// Check if this set is a subset of the given set. O(|this| + |that|).
- bool IsSubset(UniqueSet<T>* that) {
+ bool IsSubset(UniqueSet<T>* that) const {
if (that->size_ < this->size_) return false;
int j = 0;
for (int i = 0; i < this->size_; i++) {
@@ -163,7 +220,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Returns a new set representing the intersection of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
+ UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
UniqueSet<T>* out = new(zone) UniqueSet<T>();
@@ -190,7 +247,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Returns a new set representing the union of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
+ UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0) return this->Copy(zone);
if (this->size_ == 0) return that->Copy(zone);
@@ -222,7 +279,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
}
// Makes an exact copy of this set. O(|this| + |that|).
- UniqueSet<T>* Copy(Zone* zone) {
+ UniqueSet<T>* Copy(Zone* zone) const {
UniqueSet<T>* copy = new(zone) UniqueSet<T>();
copy->size_ = this->size_;
copy->capacity_ = this->size_;
@@ -231,10 +288,19 @@ class UniqueSet V8_FINAL : public ZoneObject {
return copy;
}
- inline int size() {
+ void Clear() {
+ size_ = 0;
+ }
+
+ inline int size() const {
return size_;
}
+ inline Unique<T> at(int index) const {
+ ASSERT(index >= 0 && index < size_);
+ return array_[index];
+ }
+
private:
// These sets should be small, since operations are implemented with simple
// linear algorithms. Enforce a maximum size.
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 4a08319044..062019af46 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -419,8 +419,8 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
diff --git a/deps/v8/src/utils/random-number-generator.cc b/deps/v8/src/utils/random-number-generator.cc
index 1e03ee2449..fe273315a7 100644
--- a/deps/v8/src/utils/random-number-generator.cc
+++ b/deps/v8/src/utils/random-number-generator.cc
@@ -28,6 +28,7 @@
#include "utils/random-number-generator.h"
#include <cstdio>
+#include <cstdlib>
#include "flags.h"
#include "platform/mutex.h"
@@ -67,6 +68,16 @@ RandomNumberGenerator::RandomNumberGenerator() {
}
}
+#if V8_OS_CYGWIN || V8_OS_WIN
+ // Use rand_s() to gather entropy on Windows. See:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ unsigned first_half, second_half;
+ errno_t result = rand_s(&first_half);
+ ASSERT_EQ(0, result);
+ result = rand_s(&second_half);
+ ASSERT_EQ(0, result);
+ SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
// Gather entropy from /dev/urandom if available.
FILE* fp = fopen("/dev/urandom", "rb");
if (fp != NULL) {
@@ -82,10 +93,16 @@ RandomNumberGenerator::RandomNumberGenerator() {
// We cannot assume that random() or rand() were seeded
// properly, so instead of relying on random() or rand(),
// we just seed our PRNG using timing data as fallback.
+ // This is weak entropy, but it's sufficient, because
+ // it is the responsibility of the embedder to install
+ // an entropy source using v8::V8::SetEntropySource(),
+ // which provides reasonable entropy, see:
+ // https://code.google.com/p/v8/issues/detail?id=2905
int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
- seed ^= TimeTicks::HighResNow().ToInternalValue() << 16;
+ seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
seed ^= TimeTicks::Now().ToInternalValue() << 8;
SetSeed(seed);
+#endif // V8_OS_CYGWIN || V8_OS_WIN
}
diff --git a/deps/v8/src/utils/random-number-generator.h b/deps/v8/src/utils/random-number-generator.h
index bd7dca7e65..cc7d7395e6 100644
--- a/deps/v8/src/utils/random-number-generator.h
+++ b/deps/v8/src/utils/random-number-generator.h
@@ -42,6 +42,10 @@ namespace internal {
// If two instances of RandomNumberGenerator are created with the same seed, and
// the same sequence of method calls is made for each, they will generate and
// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
class RandomNumberGenerator V8_FINAL {
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index 6711c80203..a0c3ebd07c 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -76,6 +76,14 @@ Counters::Counters(Isolate* isolate) {
StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+
+#define SC(name) \
+ count_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \
+ size_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name);
+ CODE_AGE_LIST_WITH_NO_AGE(SC)
+#undef SC
}
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index ff2247cba1..476021cdbb 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -51,6 +51,7 @@ namespace internal {
HT(compile_lazy, V8.CompileLazy)
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ /* Heap fragmentation. */ \
HP(external_fragmentation_total, \
V8.MemoryExternalFragmentationTotal) \
HP(external_fragmentation_old_pointer_space, \
@@ -67,12 +68,26 @@ namespace internal {
V8.MemoryExternalFragmentationPropertyCellSpace) \
HP(external_fragmentation_lo_space, \
V8.MemoryExternalFragmentationLoSpace) \
+ /* Percentages of heap committed to each space. */ \
+ HP(heap_fraction_new_space, \
+ V8.MemoryHeapFractionNewSpace) \
+ HP(heap_fraction_old_pointer_space, \
+ V8.MemoryHeapFractionOldPointerSpace) \
+ HP(heap_fraction_old_data_space, \
+ V8.MemoryHeapFractionOldDataSpace) \
+ HP(heap_fraction_code_space, \
+ V8.MemoryHeapFractionCodeSpace) \
HP(heap_fraction_map_space, \
V8.MemoryHeapFractionMapSpace) \
HP(heap_fraction_cell_space, \
V8.MemoryHeapFractionCellSpace) \
HP(heap_fraction_property_cell_space, \
V8.MemoryHeapFractionPropertyCellSpace) \
+ HP(heap_fraction_lo_space, \
+ V8.MemoryHeapFractionLoSpace) \
+ /* Percentage of crankshafted codegen. */ \
+ HP(codegen_fraction_crankshaft, \
+ V8.CodegenFractionCrankshaft) \
#define HISTOGRAM_MEMORY_LIST(HM) \
@@ -84,6 +99,8 @@ namespace internal {
V8.MemoryHeapSampleCellSpaceCommitted) \
HM(heap_sample_property_cell_space_committed, \
V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
+ HM(heap_sample_code_space_committed, \
+ V8.MemoryHeapSampleCodeSpaceCommitted) \
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
@@ -204,7 +221,6 @@ namespace internal {
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
@@ -320,6 +336,14 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter* count_of_CODE_AGE_##name() \
+ { return &count_of_CODE_AGE_##name##_; } \
+ StatsCounter* size_of_CODE_AGE_##name() \
+ { return &size_of_CODE_AGE_##name##_; }
+ CODE_AGE_LIST_WITH_NO_AGE(SC)
+#undef SC
+
enum Id {
#define RATE_ID(name, caption) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
@@ -345,6 +369,10 @@ class Counters {
kSizeOfFIXED_ARRAY__##name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
+ kSizeOfCODE_AGE__##name,
+ CODE_AGE_LIST_WITH_NO_AGE(COUNTER_ID)
+#undef COUNTER_ID
stats_counter_count
};
@@ -390,6 +418,12 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter size_of_CODE_AGE_##name##_; \
+ StatsCounter count_of_CODE_AGE_##name##_;
+ CODE_AGE_LIST_WITH_NO_AGE(SC)
+#undef SC
+
friend class Isolate;
explicit Counters(Isolate* isolate);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index e894164cd1..62330c32d4 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -227,19 +227,6 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_sweeper_threads = 0;
}
- if (FLAG_parallel_marking) {
- if (FLAG_marking_threads <= 0) {
- FLAG_marking_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_MARKING);
- }
- if (FLAG_marking_threads == 0) {
- FLAG_parallel_marking = false;
- }
- } else {
- FLAG_marking_threads = 0;
- }
-
if (FLAG_concurrent_recompilation &&
SystemThreadManager::NumberOfParallelSystemThreads(
SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 76eeac6a58..c42d5c4d35 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -32,7 +32,6 @@
// var $Number = global.Number;
// var $Function = global.Function;
// var $Array = global.Array;
-// var $NaN = 0/0;
//
// in math.js:
// var $floor = MathFloor
@@ -95,7 +94,7 @@ function SetUpLockedPrototype(constructor, fields, methods) {
}
if (fields) {
for (var i = 0; i < fields.length; i++) {
- %SetProperty(prototype, fields[i], void 0, DONT_ENUM | DONT_DELETE);
+ %SetProperty(prototype, fields[i], UNDEFINED, DONT_ENUM | DONT_DELETE);
}
}
for (var i = 0; i < methods.length; i += 2) {
@@ -148,7 +147,7 @@ function GlobalParseInt(string, radix) {
string = TO_STRING_INLINE(string);
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return $NaN;
+ return NAN;
}
}
@@ -197,15 +196,16 @@ function GlobalEval(x) {
function SetUpGlobal() {
%CheckIsBootstrapping();
+ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
+
// ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "NaN", NAN, attributes);
// ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "Infinity", INFINITY, attributes);
// ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "undefined", UNDEFINED, attributes);
// Set up non-enumerable function on the global object.
InstallFunctions(global, DONT_ENUM, $Array(
@@ -475,12 +475,12 @@ function ToPropertyDescriptor(obj) {
function ToCompletePropertyDescriptor(obj) {
var desc = ToPropertyDescriptor(obj);
if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
- if (!desc.hasValue()) desc.setValue(void 0);
+ if (!desc.hasValue()) desc.setValue(UNDEFINED);
if (!desc.hasWritable()) desc.setWritable(false);
} else {
// Is accessor descriptor.
- if (!desc.hasGetter()) desc.setGet(void 0);
- if (!desc.hasSetter()) desc.setSet(void 0);
+ if (!desc.hasGetter()) desc.setGet(UNDEFINED);
+ if (!desc.hasSetter()) desc.setSet(UNDEFINED);
}
if (!desc.hasEnumerable()) desc.setEnumerable(false);
if (!desc.hasConfigurable()) desc.setConfigurable(false);
@@ -491,7 +491,7 @@ function ToCompletePropertyDescriptor(obj) {
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
- this.value_ = void 0;
+ this.value_ = UNDEFINED;
this.hasValue_ = false;
this.writable_ = false;
this.hasWritable_ = false;
@@ -499,9 +499,9 @@ function PropertyDescriptor() {
this.hasEnumerable_ = false;
this.configurable_ = false;
this.hasConfigurable_ = false;
- this.get_ = void 0;
+ this.get_ = UNDEFINED;
this.hasGetter_ = false;
- this.set_ = void 0;
+ this.set_ = UNDEFINED;
this.hasSetter_ = false;
}
@@ -593,7 +593,7 @@ function ConvertDescriptorArrayToDescriptor(desc_array) {
}
if (IS_UNDEFINED(desc_array)) {
- return void 0;
+ return UNDEFINED;
}
var desc = new PropertyDescriptor();
@@ -647,10 +647,11 @@ function GetOwnProperty(obj, v) {
var p = ToName(v);
if (%IsJSProxy(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(v)) return void 0;
+ if (IS_SYMBOL(v)) return UNDEFINED;
var handler = %GetHandler(obj);
- var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p);
+ var descriptor = CallTrap1(
+ handler, "getOwnPropertyDescriptor", UNDEFINED, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
@@ -666,7 +667,7 @@ function GetOwnProperty(obj, v) {
var props = %GetOwnProperty(ToObject(obj), p);
// A false value here means that access checks failed.
- if (props === false) return void 0;
+ if (props === false) return UNDEFINED;
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -693,7 +694,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
if (IS_SYMBOL(p)) return false;
var handler = %GetHandler(obj);
- var result = CallTrap2(handler, "defineProperty", void 0, p, attributes);
+ var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
if (!ToBoolean(result)) {
if (should_throw) {
throw MakeTypeError("handler_returned_false",
@@ -710,7 +711,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
function DefineObjectProperty(obj, p, desc, should_throw) {
var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p));
// A false value here means that access checks failed.
- if (current_or_access === false) return void 0;
+ if (current_or_access === false) return UNDEFINED;
var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj));
@@ -841,7 +842,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
flag |= READ_ONLY;
}
- var value = void 0; // Default value is undefined.
+ var value = UNDEFINED; // Default value is undefined.
if (desc.hasValue()) {
value = desc.getValue();
} else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
@@ -920,7 +921,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// For the time being, we need a hack to prevent Object.observe from
// generating two change records.
obj.length = new_length;
- desc.value_ = void 0;
+ desc.value_ = UNDEFINED;
desc.hasValue_ = false;
threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
if (emit_splice) {
@@ -1045,7 +1046,7 @@ function ObjectGetOwnPropertyNames(obj) {
// Special handling for proxies.
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", void 0);
+ var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
return ToNameArray(names, "getOwnPropertyNames", false);
}
@@ -1194,7 +1195,7 @@ function ObjectDefineProperties(obj, properties) {
// Harmony proxies.
function ProxyFix(obj) {
var handler = %GetHandler(obj);
- var props = CallTrap0(handler, "fix", void 0);
+ var props = CallTrap0(handler, "fix", UNDEFINED);
if (IS_UNDEFINED(props)) {
throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
}
@@ -1560,8 +1561,8 @@ function NumberToFixed(fractionDigits) {
}
if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
return %NumberToFixed(x, f);
}
@@ -1578,11 +1579,11 @@ function NumberToExponential(fractionDigits) {
// Get the value of this number in case it's an object.
x = %_ValueOf(this);
}
- var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits);
+ var f = IS_UNDEFINED(fractionDigits) ? UNDEFINED : TO_INTEGER(fractionDigits);
if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
if (IS_UNDEFINED(f)) {
f = -1; // Signal for runtime function that f is not defined.
@@ -1608,8 +1609,8 @@ function NumberToPrecision(precision) {
var p = TO_INTEGER(precision);
if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
if (p < 1 || p > 21) {
throw new $RangeError("toPrecision() argument must be between 1 and 21");
@@ -1654,18 +1655,18 @@ function SetUpNumber() {
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($Number, "NaN", NAN, DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.4.
%SetProperty($Number,
"NEGATIVE_INFINITY",
- -1/0,
+ -INFINITY,
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.5.
%SetProperty($Number,
"POSITIVE_INFINITY",
- 1/0,
+ INFINITY,
DONT_ENUM | DONT_DELETE | READ_ONLY);
%ToFastProperties($Number);
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 33b620d8ea..cc4f43965f 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -42,11 +42,6 @@ namespace v8 {
bool Locker::active_ = false;
-Locker::Locker() {
- Initialize(i::Isolate::GetDefaultIsolateForLocking());
-}
-
-
// Once the Locker is initialized, the current thread will be guaranteed to have
// the lock for a given isolate.
void Locker::Initialize(v8::Isolate* isolate) {
@@ -116,11 +111,6 @@ Locker::~Locker() {
}
-Unlocker::Unlocker() {
- Initialize(i::Isolate::GetDefaultIsolateForLocking());
-}
-
-
void Unlocker::Initialize(v8::Isolate* isolate) {
ASSERT(isolate != NULL);
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
@@ -143,14 +133,15 @@ Unlocker::~Unlocker() {
}
-void Locker::StartPreemption(int every_n_ms) {
+void Locker::StartPreemption(v8::Isolate* isolate, int every_n_ms) {
v8::internal::ContextSwitcher::StartPreemption(
- i::Isolate::Current(), every_n_ms);
+ reinterpret_cast<i::Isolate*>(isolate), every_n_ms);
}
-void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption(i::Isolate::Current());
+void Locker::StopPreemption(v8::Isolate* isolate) {
+ v8::internal::ContextSwitcher::StopPreemption(
+ reinterpret_cast<i::Isolate*>(isolate));
}
@@ -481,7 +472,6 @@ void ContextSwitcher::Run() {
// Acknowledge the preemption by the receiving thread.
void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking()));
// There is currently no accounting being done for this. But could be in the
// future, which is why we leave this in.
}
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index fd3f4a5095..02e57ebe72 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -194,61 +194,6 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
}
-// Copies data from |src| to |dst|. No restrictions.
-template <typename T>
-inline void MoveBytes(T* dst, const T* src, size_t num_bytes) {
- STATIC_ASSERT(sizeof(T) == 1);
- switch (num_bytes) {
- case 0: return;
- case 1:
- *dst = *src;
- return;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- case 2:
- *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src);
- return;
- case 3: {
- uint16_t part1 = *reinterpret_cast<const uint16_t*>(src);
- byte part2 = *(src + 2);
- *reinterpret_cast<uint16_t*>(dst) = part1;
- *(dst + 2) = part2;
- return;
- }
- case 4:
- *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src);
- return;
- case 5:
- case 6:
- case 7:
- case 8: {
- uint32_t part1 = *reinterpret_cast<const uint32_t*>(src);
- uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4);
- *reinterpret_cast<uint32_t*>(dst) = part1;
- *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2;
- return;
- }
- case 9:
- case 10:
- case 11:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16: {
- double part1 = *reinterpret_cast<const double*>(src);
- double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8);
- *reinterpret_cast<double*>(dst) = part1;
- *reinterpret_cast<double*>(dst + num_bytes - 8) = part2;
- return;
- }
-#endif
- default:
- OS::MemMove(dst, src, num_bytes);
- return;
- }
-}
-
-
template <typename T, typename U>
inline void MemsetPointer(T** dest, U* value, int counter) {
#ifdef DEBUG
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index a65b54f674..6d4efa2261 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 21
-#define BUILD_NUMBER 18
-#define PATCH_LEVEL 3
+#define MINOR_VERSION 22
+#define BUILD_NUMBER 24
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/win32-math.cc b/deps/v8/src/win32-math.cc
index 88fa3a684b..8f6d077431 100644
--- a/deps/v8/src/win32-math.cc
+++ b/deps/v8/src/win32-math.cc
@@ -29,7 +29,7 @@
// refer to The Open Group Base Specification for specification of the correct
// semantics for these functions.
// (http://www.opengroup.org/onlinepubs/000095399/)
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
diff --git a/deps/v8/src/win32-math.h b/deps/v8/src/win32-math.h
index 0397c7e14e..fd9312b0f5 100644
--- a/deps/v8/src/win32-math.h
+++ b/deps/v8/src/win32-math.h
@@ -37,6 +37,8 @@
#error Wrong environment, expected MSVC.
#endif // _MSC_VER
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
enum {
FP_NAN,
FP_INFINITE,
@@ -58,4 +60,6 @@ int signbit(double x);
} // namespace std
+#endif // _MSC_VER < 1800
+
#endif // V8_WIN32_MATH_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 07d07033e9..afac886c73 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -43,6 +43,7 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 6;
void Assembler::emitl(uint32_t x) {
@@ -61,11 +62,8 @@ void Assembler::emitp(void* x, RelocInfo::Mode rmode) {
}
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+void Assembler::emitq(uint64_t x) {
Memory::uint64_at(pc_) = x;
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, x);
- }
pc_ += sizeof(uint64_t);
}
@@ -79,7 +77,8 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
} else {
@@ -392,6 +391,13 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return origin->code_target_object_handle_at(pc_ + 1);
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 41bf297b38..dcb9fa5621 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -44,7 +44,7 @@ bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
-
+uint64_t CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
ASSERT(CpuFeatures::initialized_);
@@ -110,8 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE64);
- patcher.masm()->call(r10);
+ patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64);
+ patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
ASSERT_EQ(Assembler::kCallSequenceLength,
@@ -1465,26 +1465,24 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// Non-relocatable values might not need a 64-bit representation.
- if (RelocInfo::IsNone(rmode)) {
- if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- }
+ ASSERT(RelocInfo::IsNone(rmode));
+ if (is_uint32(value)) {
+ movl(dst, Immediate(static_cast<int32_t>(value)));
+ } else if (is_int32(value)) {
+ movq(dst, Immediate(static_cast<int32_t>(value)));
+ } else {
// Value cannot be represented by 32 bits, so do a full 64 bit immediate
// value.
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitq(value);
}
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(value, rmode);
}
void Assembler::movq(Register dst, ExternalReference ref) {
- int64_t value = reinterpret_cast<int64_t>(ref.address());
+ Address value = reinterpret_cast<Address>(ref.address());
movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
}
@@ -1899,7 +1897,7 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchg(Register dst, Register src) {
+void Assembler::xchgq(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
@@ -1917,6 +1915,24 @@ void Assembler::xchg(Register dst, Register src) {
}
+void Assembler::xchgl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
+ Register other = src.is(rax) ? dst : src;
+ emit_optional_rex_32(other);
+ emit(0x90 | other.low_bits());
+ } else if (dst.low_bits() == 4) {
+ emit_optional_rex_32(dst, src);
+ emit(0x87);
+ emit_modrm(dst, src);
+ } else {
+ emit_optional_rex_32(src, dst);
+ emit(0x87);
+ emit_modrm(src, dst);
+ }
+}
+
+
void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
emit(0x48); // REX.W
@@ -2035,6 +2051,14 @@ void Assembler::testl(const Operand& op, Immediate mask) {
}
+void Assembler::testl(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(reg, op);
+ emit(0x85);
+ emit_operand(reg, op);
+}
+
+
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
emit_rex_64(reg, op);
@@ -2058,6 +2082,10 @@ void Assembler::testq(Register dst, Register src) {
void Assembler::testq(Register dst, Immediate mask) {
+ if (is_uint8(mask.value_)) {
+ testb(dst, mask);
+ return;
+ }
EnsureSpace ensure_space(this);
if (dst.is(rax)) {
emit_rex_64();
@@ -2448,6 +2476,17 @@ void Assembler::emit_farith(int b1, int b2, int i) {
}
+// SSE operations.
+
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
@@ -2550,15 +2589,15 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
- emit_optional_rex_32(dst, src);
+ emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x3A);
emit(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
emit(imm8);
}
@@ -3000,8 +3039,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ // Don't record external references unless the heap will be serialized.
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3010,6 +3049,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
+ } else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
+ // Don't record psuedo relocation info for code age sequence mode.
+ return;
}
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index f2e37fe863..508c622112 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -471,26 +471,45 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ if (Check(f, cross_compile_)) return true;
ASSERT(initialized_);
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
// Safe defaults include CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
@@ -503,7 +522,10 @@ class CpuFeatures : public AllStatic {
static uint64_t supported_;
static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -701,7 +723,6 @@ class Assembler : public AssemblerBase {
// All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- void movq(Register dst, const char* s, RelocInfo::Mode rmode);
// Moves the address of the external reference into the register.
void movq(Register dst, ExternalReference ext);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
@@ -734,7 +755,8 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
- void xchg(Register dst, Register src);
+ void xchgq(Register dst, Register src);
+ void xchgl(Register dst, Register src);
// Arithmetics
void addl(Register dst, Register src) {
@@ -969,6 +991,10 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x09, src, dst);
}
+ void orl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x09, src, dst);
+ }
+
void or_(Register dst, Immediate src) {
immediate_arithmetic_op(0x1, dst, src);
}
@@ -994,6 +1020,10 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x0);
}
+ void roll(Register dst, Immediate imm8) {
+ shift_32(dst, imm8, 0x0);
+ }
+
void rcr(Register dst, Immediate imm8) {
shift(dst, imm8, 0x3);
}
@@ -1101,6 +1131,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_32(0x2B, dst, src);
}
+ void subl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x29, src, dst);
+ }
+
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
@@ -1119,6 +1153,7 @@ class Assembler : public AssemblerBase {
void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
+ void testl(const Operand& op, Register reg);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
void testq(Register dst, Register src);
@@ -1144,6 +1179,10 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_32(0x6, dst, src);
}
+ void xorl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x31, src, dst);
+ }
+
void xorl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
@@ -1307,13 +1346,26 @@ class Assembler : public AssemblerBase {
void sahf();
+ // SSE instructions
+ void movaps(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, const Operand& src);
+ void movss(const Operand& dst, XMMRegister src);
+
+ void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src);
+ void cvtlsi2ss(XMMRegister dst, Register src);
+
+ void xorps(XMMRegister dst, XMMRegister src);
+ void andps(XMMRegister dst, XMMRegister src);
+
+ void movmskps(Register dst, XMMRegister src);
+
// SSE2 instructions
void movd(XMMRegister dst, Register src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
void movq(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
// Don't use this unless it's important to keep the
// top half of the destination register unchanged.
@@ -1331,13 +1383,7 @@ class Assembler : public AssemblerBase {
void movdqu(XMMRegister dst, const Operand& src);
void movapd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
- void cvttss2si(Register dst, const Operand& src);
- void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1347,7 +1393,6 @@ class Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
- void cvtlsi2ss(XMMRegister dst, Register src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, const Operand& src);
@@ -1366,11 +1411,16 @@ class Assembler : public AssemblerBase {
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
+ void movmskpd(Register dst, XMMRegister src);
+
+ // SSE 4.1 instruction
+ void extractps(Register dst, XMMRegister src, byte imm8);
enum RoundingMode {
kRoundToNearest = 0x0,
@@ -1381,17 +1431,6 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
-
- void cmpltsd(XMMRegister dst, XMMRegister src);
-
- // The first argument is the reg field, the second argument is the r/m field.
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
// Debugging
void Print();
@@ -1452,7 +1491,7 @@ class Assembler : public AssemblerBase {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
inline void emitp(void* x, RelocInfo::Mode rmode);
- inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+ inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
inline void emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
@@ -1572,6 +1611,12 @@ class Assembler : public AssemblerBase {
// Emit the code-object-relative offset of the label's position
inline void emit_code_relative_offset(Label* label);
+ // The first argument is the reg field, the second argument is the r/m field.
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, Register src);
+ void emit_sse_operand(Register dst, XMMRegister src);
+
// Emit machine code for one of the operations ADD, ADC, SUB, SBC,
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 81721c25e1..f65b25c652 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -600,6 +600,8 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the stub returns.
__ subq(Operand(rsp, 0), Immediate(5));
__ Pushad();
+ __ movq(arg_reg_2,
+ ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
@@ -625,6 +627,42 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ Pushad();
+ __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 1);
+ }
+ __ Popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ PopReturnAddressTo(kScratchRegister);
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// Enter an internal frame.
{
@@ -658,17 +696,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, kPCOnStackSize));
+ __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -692,21 +730,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
@@ -894,9 +917,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// rbp[16] : function arguments
// rbp[24] : receiver
// rbp[32] : function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+ static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ static const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ push(Operand(rbp, kFunctionOffset));
__ push(Operand(rbp, kArgumentsOffset));
@@ -1140,13 +1163,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- &not_cached);
+ __ LookupNumberStringCache(rax, // Input.
+ rbx, // Result.
+ rcx, // Scratch 1.
+ rdx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
@@ -1401,6 +1422,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 51e1a5395c..b3ab8c1e75 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -145,6 +156,18 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -157,7 +180,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &rax;
+ descriptor->stack_parameter_count_ = rax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -179,7 +202,7 @@ static void InitializeInternalArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &rax;
+ descriptor->stack_parameter_count_ = rax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -437,35 +460,8 @@ class FloatingPointHelper : public AllStatic {
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
-
- // Tries to convert two values to smis losslessly.
- // This fails if either argument is not a Smi nor a HeapNumber,
- // or if it's a HeapNumber with a value that can't be converted
- // losslessly to a Smi. In that case, control transitions to the
- // on_not_smis label.
- // On success, either control goes to the on_success label (if one is
- // provided), or it falls through at the end of the code (if on_success
- // is NULL).
- // On success, both first and second holds Smi tagged values.
- // One of first or second must be non-Smi when entering.
- static void NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined);
};
@@ -553,569 +549,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- __ Push(Smi::FromInt(MinorKey()));
-
- __ PushReturnAddressFrom(rcx);
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
-
- // Arguments to BinaryOpStub are in rdx and rax.
- const Register left = rdx;
- const Register right = rax;
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations and logical right shift by 0.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
- (op == Token::ADD || op == Token::SUB ||
- op == Token::MUL || op == Token::DIV || op == Token::SHR);
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label fail;
-
- if (op != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
-
- Label smi_values;
- __ bind(&smi_values);
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
-
- if (use_fp_on_smis.is_linked()) {
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op == Token::DIV || op == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- if (op == Token::SHR) {
- __ SmiToInteger32(left, left);
- __ cvtqsi2sd(xmm0, left);
- } else {
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ jmp(&fail);
- }
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- // But first we check if non-smi values are HeapNumbers holding
- // values that could be smi.
- __ bind(&not_smis);
- Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::ConvertUndefined convert_undefined =
- FloatingPointHelper::BAILOUT_ON_UNDEFINED;
- // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
- }
- FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail, convert_undefined);
- __ jmp(&smi_values);
- __ bind(&fail);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure,
- Token::Value op,
- OverwriteMode mode) {
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, allocation_failure, mode);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rbx);
- __ jmp(allocation_failure);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
- }
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
- MacroAssembler* masm) {
- // Push arguments, but ensure they are under the return address
- // for a tail call.
- __ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
- __ PushReturnAddressFrom(rcx);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- if (call_runtime.is_linked()) {
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
- }
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- UNREACHABLE();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
- Register input,
- Label* fail) {
- Label ok;
- __ JumpIfSmi(input, &ok, Label::kNear);
- Register heap_number_map = r8;
- Register scratch1 = r9;
- Register scratch2 = r10;
- // HeapNumbers containing 32bit integer values are also allowed.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, fail);
- __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
- // Convert, convert back, and compare the two doubles' bits.
- __ cvttsd2siq(scratch2, xmm0);
- __ cvtlsi2sd(xmm1, scratch2);
- __ movq(scratch1, xmm0);
- __ movq(scratch2, xmm1);
- __ cmpq(scratch1, scratch2);
- __ j(not_equal, fail);
- __ bind(&ok);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
- }
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &gc_required, &not_number, op_, mode_);
-
- __ bind(&not_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(rdx);
- __ push(rax);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -1145,7 +578,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
+ __ Cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
@@ -1161,7 +594,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input is a HeapNumber. Push it on the FPU stack and load its
// bits into rbx.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rdx, rbx);
__ bind(&loaded);
@@ -1422,67 +855,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(r8, 0);
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in r8.
- __ TruncateHeapNumberToI(r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(rcx, 0);
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- __ TruncateHeapNumberToI(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -1503,89 +875,12 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ Cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined) {
- Register heap_number_map = scratch3;
- Register smi_result = scratch1;
- Label done, maybe_undefined_first, maybe_undefined_second, first_done;
-
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label first_smi;
- __ JumpIfSmi(first, &first_smi, Label::kNear);
- __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_first
- : on_not_smis);
- // Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- // Check if conversion was successful by converting back and
- // comparing to the original double's bits.
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(first, smi_result);
-
- __ bind(&first_done);
- __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
- __ bind(&first_smi);
- __ AssertNotSmi(second);
- __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_second
- : on_not_smis);
- // Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(second, smi_result);
- if (on_success != NULL) {
- __ jmp(on_success);
- } else {
- __ jmp(&done);
- }
-
- __ bind(&maybe_undefined_first);
- __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(first, first);
- __ jmp(&first_done);
-
- __ bind(&maybe_undefined_second);
- __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(second, second);
- if (on_success != NULL) {
- __ jmp(on_success);
- }
- // Else: fall through.
-
+ __ Cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
@@ -1603,7 +898,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
- __ cvtlsi2sd(double_result, scratch);
+ __ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -1623,7 +918,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&base_is_smi);
__ SmiToInteger32(base, base);
- __ cvtlsi2sd(double_base, base);
+ __ Cvtlsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1812,7 +1107,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtlsi2sd(double_exponent, exponent);
+ __ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -1902,8 +1197,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = rax;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -1977,11 +1271,6 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
// Check that the key is a smi.
Label slow;
__ JumpIfNotSmi(rdx, &slow);
@@ -2003,10 +1292,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ SmiSub(rax, rax, rdx);
+ __ SmiToInteger32(rax, rax);
+ StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
@@ -2018,10 +1307,11 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ SmiSub(rcx, rcx, rdx);
+ __ SmiToInteger32(rcx, rcx);
+ StackArgumentsAccessor adaptor_args(rbx, rcx,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
@@ -2395,11 +1685,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rsp[24] : subject string
// rsp[32] : JSRegExp object
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
+ enum RegExpExecStubArgumentIndices {
+ JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
+ SUBJECT_STRING_ARGUMENT_INDEX,
+ PREVIOUS_INDEX_ARGUMENT_INDEX,
+ LAST_MATCH_INFO_ARGUMENT_INDEX,
+ REG_EXP_EXEC_ARGUMENT_COUNT
+ };
+ StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
Label runtime;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -2412,7 +1707,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
@@ -2445,7 +1740,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- __ movq(rdi, Operand(rsp, kSubjectOffset));
+ __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ JumpIfSmi(rdi, &runtime);
__ movq(r15, rdi); // Make a copy of the original subject string.
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -2547,7 +1842,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// We have to use r15 instead of rdi to load the length because rdi might
// have been only made to look like a sequential string when it actually
// is an external string.
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
__ j(above_equal, &runtime);
@@ -2649,7 +1944,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -2667,11 +1962,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For failure return null.
__ LoadRoot(rax, Heap::kNullValueRootIndex);
- __ ret(4 * kPointerSize);
+ __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
// Load RegExp data.
__ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
@@ -2680,7 +1975,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
__ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
@@ -2704,7 +1999,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
__ movq(rcx, rax);
__ RecordWriteField(rbx,
@@ -2747,7 +2042,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Return last match info.
__ movq(rax, r15);
- __ ret(4 * kPointerSize);
+ __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
// Result must now be exception. If there is no pending exception already a
@@ -2910,112 +2205,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- Factory* factory = masm->isolate()->factory();
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rbx, args.GetArgumentOperand(0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -3322,6 +2511,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // rax : number of arguments to the construct function
// rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
@@ -3341,9 +2531,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rcx, 0), allocation_site_map);
__ j(not_equal, &miss);
@@ -3379,6 +2568,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi);
@@ -3562,6 +2752,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
@@ -3619,6 +2810,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
+ __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, rax);
__ movq(kScratchRegister,
ExternalReference::perform_gc_function(masm->isolate()));
@@ -4583,34 +3775,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
-
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, rcx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, rcx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -4646,12 +3815,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
__ bind(&done);
@@ -4935,13 +4099,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsp[16] : from
// rsp[24] : string
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+ enum SubStringStubArgumentIndices {
+ STRING_ARGUMENT_INDEX,
+ FROM_ARGUMENT_INDEX,
+ TO_ARGUMENT_INDEX,
+ SUB_STRING_ARGUMENT_COUNT
+ };
+
+ StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
+ __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -4951,8 +4120,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
+ __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
@@ -4965,7 +4134,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Return original string.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&not_original_string);
Label single_char;
@@ -5035,9 +4204,15 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
+ // Make long jumps when allocations tracking is on due to
+ // RecordObjectAllocation inside MacroAssembler::Allocate.
+ Label::Distance jump_distance =
+ masm->isolate()->heap_profiler()->is_tracking_allocations()
+ ? Label::kFar
+ : Label::kNear;
+ __ j(zero, &two_byte_slice, jump_distance);
__ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
+ __ jmp(&set_slice_header, jump_distance);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
@@ -5048,7 +4223,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
__ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(3 * kPointerSize);
__ bind(&copy_routine);
}
@@ -5102,7 +4277,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&two_byte_sequential);
// Allocate the result.
@@ -5127,7 +4302,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
__ movq(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
@@ -5141,7 +4316,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringCharAtGenerator generator(
rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
generator.SkipSlow(masm, &runtime);
}
@@ -5376,7 +4551,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ cvtlsi2sd(xmm1, rcx);
+ __ Cvtlsi2sd(xmm1, rcx);
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
@@ -5386,7 +4561,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&left_smi);
__ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ cvtlsi2sd(xmm0, rcx);
+ __ Cvtlsi2sd(xmm0, rcx);
__ bind(&done);
// Compare operands
@@ -6392,9 +5567,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ incl(rdx);
__ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rcx, 0), allocation_site_map);
__ Assert(equal, kExpectedAllocationSiteInCell);
}
@@ -6541,7 +5715,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
__ Cmp(FieldOperand(rdx, 0),
- Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
+ masm->isolate()->factory()->allocation_site_map());
__ j(not_equal, &no_info);
__ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 41678ecd20..c76abcf001 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -208,34 +208,6 @@ class StringCompareStub: public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 24773c2595..390ec7c9c9 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -263,8 +263,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(rdx, rdi);
- __ j(equal, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
}
// Set transitioned map.
@@ -292,8 +291,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label allocated, new_backing_store, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(rdx, rdi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -386,7 +384,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
- __ cvtlsi2sd(xmm0, rbx);
+ __ Cvtlsi2sd(xmm0, rbx);
__ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
xmm0);
__ jmp(&entry);
@@ -418,8 +416,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(rdx, rdi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -469,7 +466,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
__ movq(FieldOperand(r11,
r9,
times_pointer_size,
@@ -678,8 +675,6 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static const int kNoCodeAgeSequenceLength = 6;
-
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
static byte sequence[kNoCodeAgeSequenceLength];
@@ -711,7 +706,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
@@ -723,30 +718,27 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start());
- for (int i = 0;
- i < kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength;
- i++) {
- patcher.masm()->nop();
- }
+ patcher.masm()->Nop(
+ kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
}
}
Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
ASSERT(index >= 0);
- ASSERT(base_reg_.is(rsp) || base_reg_.is(rbp));
int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
int displacement_to_last_argument = base_reg_.is(rsp) ?
kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 7d1f59ad5f..811ac507d5 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -156,7 +156,7 @@ class StackArgumentsAccessor BASE_EMBEDDED {
Operand GetArgumentOperand(int index);
Operand GetReceiverOperand() {
ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
- return GetArgumentOperand(0);;
+ return GetArgumentOperand(0);
}
private:
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 303b756cac..bf11e0860f 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -82,87 +82,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// add <profiling_counter>, <-delta>
-// jns ok
-// call <stack guard>
-// ok:
-//
-// We will patch away the branch so the code is:
-//
-// add <profiling_counter>, <-delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -187,10 +106,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->environment_length();
output_frame->SetRegister(rax.code(), params);
output_frame->SetRegister(rbx.code(), handler);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 9984a46307..7735b552fe 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -93,7 +93,7 @@ static const ByteMnemonic two_operands_instr[] = {
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxl" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
@@ -1036,14 +1036,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("extractps "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
- AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
} else if (third_byte == 0x0b) {
get_modrm(*current, &mod, &regop, &rm);
// roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(", %d", (*current) & 3);
+ AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", (*current) & 3);
current += 1;
} else {
UnimplementedInstruction();
@@ -1062,12 +1062,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} // else no immediate displacement.
AppendToBuffer("nop");
} else if (opcode == 0x28) {
- AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
AppendToBuffer("movapd ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x6E) {
AppendToBuffer("mov%c %s,",
rex_w() ? 'q' : 'd',
@@ -1081,15 +1081,15 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0xD6) {
AppendToBuffer("movq ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1214,7 +1214,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
@@ -1238,7 +1238,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// movaps xmm, xmm/m128
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
@@ -1247,7 +1247,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0xA2) {
// CPUID
@@ -1260,18 +1260,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+ } else if (opcode == 0x54) {
+ // xorps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("andps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if (opcode == 0x57) {
// xorps xmm, xmm/m128
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("xorps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x50) {
// movmskps reg, xmm
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop));
+ AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF0) == 0x80) {
@@ -1450,7 +1457,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr =
reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
@@ -1599,7 +1606,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (reg == 0) {
AppendToBuffer("nop"); // Common name for xchg rax,rax.
} else {
- AppendToBuffer("xchg%c rax, %s",
+ AppendToBuffer("xchg%c rax,%s",
operand_size_code(),
NameOfCPURegister(reg));
}
@@ -1628,12 +1635,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
bool is_32bit = (opcode >= 0xB8);
int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
if (is_32bit) {
- AppendToBuffer("mov%c %s, ",
+ AppendToBuffer("mov%c %s,",
operand_size_code(),
NameOfCPURegister(reg));
data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
- AppendToBuffer("movb %s, ",
+ AppendToBuffer("movb %s,",
NameOfByteCPURegister(reg));
data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
@@ -1755,7 +1762,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x3C:
- AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1));
data +=2;
break;
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 2af5a81bb5..fb17964ada 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -70,11 +70,11 @@ class ExitFrameConstants : public AllStatic {
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerPCOffset = kFPOnStackSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
+ static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
};
@@ -82,7 +82,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index c24512ecae..02ba67b90e 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -140,10 +140,9 @@ void FullCodeGenerator::Generate() {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ movq(args.GetReceiverOperand(), kScratchRegister);
__ bind(&ok);
}
@@ -153,10 +152,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -678,7 +674,8 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
int offset = -var->index() * kPointerSize;
// Adjust by a (parameter or local) base offset.
if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ offset += kFPOnStackSize + kPCOnStackSize +
+ (info_->scope()->num_parameters() - 1) * kPointerSize;
} else {
offset += JavaScriptFrameConstants::kLocal0Offset;
}
@@ -1129,7 +1126,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(rbx, cell);
+ __ Move(rbx, cell);
__ Move(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
@@ -1609,21 +1606,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -2638,7 +2629,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
// Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+ StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
+ __ push(args.GetReceiverOperand());
// Push the language mode.
__ Push(Smi::FromInt(language_mode()));
@@ -3513,8 +3505,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into rax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -4883,6 +4875,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x1d;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 4a7c68a53c..15f410c134 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1330,7 +1330,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1451,7 +1451,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 9dca6b3e20..7c70094fbf 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -89,9 +89,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -103,24 +101,6 @@ void LChunkBuilder::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- int length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
@@ -152,10 +132,9 @@ bool LCodeGen::GeneratePrologue() {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ StackArgumentsAccessor args(rsp, scope()->num_parameters());
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ movq(args.GetReceiverOperand(), kScratchRegister);
__ bind(&ok);
}
}
@@ -164,14 +143,7 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- if (info()->IsStub()) {
- __ Push(Smi::FromInt(StackFrame::STUB));
- } else {
- __ push(rdi); // Callee's JS function.
- }
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
info()->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -273,36 +245,6 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- RecordAndUpdatePosition(instr->position());
-
- instr->CompileToNative(this);
- }
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- return !is_aborted();
-}
-
-
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -350,8 +292,9 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -614,8 +557,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
int argc) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
@@ -637,13 +578,13 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
}
@@ -754,26 +695,31 @@ void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -884,7 +830,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -896,17 +842,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -973,11 +912,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1615,8 +1549,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
+ __ Move(ToRegister(instr->result()), value);
}
@@ -1832,7 +1765,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -1878,15 +1811,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
// when there is a mulsd depending on the result
__ movaps(left, left);
break;
- case Token::MOD:
+ case Token::MOD: {
+ XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
+ __ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
+ __ movaps(result, xmm_scratch);
break;
+ }
default:
UNREACHABLE();
break;
@@ -1905,14 +1840,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
@@ -1947,25 +1874,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@@ -1981,8 +1889,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2001,8 +1910,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, no_condition);
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2083,8 +1993,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
@@ -2119,6 +2030,10 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -2206,7 +2121,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
if (instr->right()->IsConstantOperand()) {
Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
- __ CmpObject(left, right);
+ __ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
__ cmpq(left, right);
@@ -2574,7 +2489,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
__ push(ToRegister(instr->value()));
- __ PushHeapObject(instr->function());
+ __ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
@@ -2610,14 +2525,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -2682,7 +2589,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell());
+ __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -2704,7 +2611,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<Cell> cell_handle = instr->hydrogen()->cell();
+ Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2805,7 +2712,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
__ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
} else {
Register object = ToRegister(instr->object());
- __ movq(result, MemOperand(object, offset));
+ __ Load(result, MemOperand(object, offset), access.representation());
}
return;
}
@@ -2819,12 +2726,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ movq(result, FieldOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset));
+ object = result;
}
+ __ Load(result, FieldOperand(object, offset), access.representation());
}
@@ -2879,6 +2785,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -2896,8 +2808,9 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ movq(result, Operand(arguments, index * kPointerSize));
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(result, args.GetArgumentOperand(const_index));
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2907,8 +2820,9 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
} else {
__ subl(length, ToOperand(instr->index()));
}
- __ movq(result,
- Operand(arguments, length, times_pointer_size, kPointerSize));
+ StackArgumentsAccessor args(arguments, length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(result, args.GetArgumentOperand(0));
}
}
@@ -3112,7 +3026,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -2 * kPointerSize));
+ __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
@@ -3234,7 +3148,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ testl(length, length);
__ j(zero, &invoke, Label::kNear);
__ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(elements, length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3242,7 +3158,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
@@ -3285,7 +3200,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(rsi); // The context is the first argument.
- __ PushHeapObject(instr->hydrogen()->pairs());
+ __ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
@@ -3316,11 +3231,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (rdi_state == RDI_UNINITIALIZED) {
- __ LoadHeapObject(rdi, function);
+ __ Move(rdi, function);
}
// Change context.
@@ -3401,10 +3315,10 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
@@ -3451,11 +3365,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ andpd(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else if (r.IsSmi()) {
@@ -3473,7 +3387,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3520,7 +3434,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, input_reg);
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
@@ -3532,7 +3446,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
void LCodeGen::DoMathRound(LMathRound* instr) {
- const XMMRegister xmm_scratch = xmm0;
+ const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
@@ -3569,7 +3483,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &restore, Label::kNear);
__ subl(output_reg, Immediate(1));
@@ -3600,7 +3514,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3717,8 +3631,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
__ movq(scratch4, scratch3);
@@ -3731,18 +3644,44 @@ void LCodeGen::DoRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(instr->value()->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ Label positive, done, zero;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(above, &positive, Label::kNear);
+ __ j(equal, &zero, Label::kNear);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand nan_operand = masm()->ExternalOperand(nan);
+ __ movsd(input_reg, nan_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&zero);
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ Operand ninf_operand = masm()->ExternalOperand(ninf);
+ __ movsd(input_reg, ninf_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive);
+ __ fldln2();
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), input_reg);
+ __ fld_d(Operand(rsp, 0));
+ __ fyl2x();
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(input_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ bind(&done);
}
@@ -3777,7 +3716,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
@@ -3910,7 +3848,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -3940,11 +3878,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
+ ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
Register object = ToRegister(instr->object());
- __ movq(MemOperand(object, offset), value);
+ __ Store(MemOperand(object, offset), value, representation);
}
return;
}
@@ -4013,15 +3952,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ movq(FieldOperand(write_register, offset),
- ToRegister(operand_value));
+ Register value = ToRegister(operand_value);
+ __ Store(FieldOperand(write_register, offset), value, representation);
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ Move(FieldOperand(write_register, offset), handle_value);
}
} else {
- __ movq(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ Register value = ToRegister(instr->value());
+ __ Store(FieldOperand(write_register, offset), value, representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4325,8 +4265,10 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
}
@@ -4449,9 +4391,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
} else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
}
@@ -4479,6 +4421,22 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange() ||
+ instr->hydrogen()->value()->range()->upper() == kMaxInt) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ __ testl(ToRegister(input), Immediate(0x80000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -4517,15 +4475,17 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
Label slow;
Register reg = ToRegister(instr->value());
Register tmp = reg.is(rax) ? rcx : rax;
+ XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
Label done;
- // Load value into xmm1 which will be preserved across potential call to
+ // Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ LoadUint32(temp_xmm, reg, xmm_scratch);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
@@ -4543,10 +4503,10 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(rax)) __ movq(reg, rax);
- // Done. Put the value in xmm1 into the value of the allocated heap
+ // Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
__ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4623,7 +4583,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -4632,27 +4592,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
- // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
+ // On x64 it is safe to load at heap number offset before evaluating the map
+ // check, since all heap objects are at least two words long.
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ bind(&heap_number);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert);
+ } else {
+ DeoptimizeIf(not_equal, env);
}
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
@@ -4661,6 +4613,18 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -4668,30 +4632,44 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ Cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
- Label heap_number;
Register input_reg = ToRegister(instr->value());
-
if (instr->truncating()) {
+ Label no_heap_number, check_bools, check_false;
+
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
+
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ __ j(not_equal, &check_bools, Label::kNear);
__ Set(input_reg, 0);
__ jmp(done);
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ bind(&check_bools);
+ __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, 1);
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
+ DeoptimizeIf(not_equal, instr->environment());
+ __ Set(input_reg, 0);
+ __ jmp(done);
} else {
Label bailout;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
@@ -4721,12 +4699,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
-
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiToInteger32(input_reg, input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -4764,7 +4746,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TruncateDoubleToI(result_reg, input_reg);
} else {
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
@@ -4785,7 +4768,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(result);
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
@@ -4862,8 +4846,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
- __ CmpHeapObject(reg, object);
+ __ Cmp(reg, instr->hydrogen()->object().handle());
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4903,22 +4886,21 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
@@ -4932,8 +4914,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -4948,6 +4931,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4966,8 +4950,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -5089,7 +5073,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// rax = regexp literal clone.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(rcx, instr->hydrogen()->literals());
+ __ Move(rcx, instr->hydrogen()->literals());
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -5160,13 +5144,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- AllowDeferredHandleDereference smi_check;
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
+ __ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
@@ -5280,7 +5258,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index f994645019..f3f202a277 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -32,6 +32,7 @@
#include "checks.h"
#include "deoptimizer.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -44,42 +45,25 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -146,18 +130,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
@@ -166,7 +138,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock() const;
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -178,14 +150,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
@@ -211,7 +181,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -284,8 +255,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -340,7 +310,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt(int space_needed);
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -355,24 +325,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void MakeSureStackPagesMapped(int offset);
#endif
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -384,8 +344,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 71db17c931..8d1c2a2835 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -200,7 +200,7 @@ void LGapResolver::EmitMove(int index) {
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
@@ -222,7 +222,7 @@ void LGapResolver::EmitMove(int index) {
// value.
__ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+ __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
__ movq(dst, kScratchRegister);
}
}
@@ -262,7 +262,7 @@ void LGapResolver::EmitSwap(int index) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
+ __ xchgq(dst, src);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index d9daaacca0..6262e7ede3 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -353,19 +353,20 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -445,7 +446,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -664,7 +665,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -719,46 +720,39 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseFixed(right_value, rcx);
+ }
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool does_deopt = false;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
} else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
- }
+ return DoArithmeticT(op, instr);
}
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
}
@@ -767,21 +761,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -864,10 +859,31 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ HValue* first_operand = current->OperandCount() == 0
+ ? graph()->GetConstant1()
+ : current->OperandAt(0);
+ instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand)));
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -897,14 +913,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -996,7 +1010,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1006,16 +1020,10 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@@ -1067,12 +1075,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1095,7 +1097,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseOrConstant(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1161,14 +1162,12 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1215,9 +1214,11 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return DefineSameAsFirst(result);
}
@@ -1270,33 +1271,28 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* key = UseFixed(instr->key(), rcx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1304,7 +1300,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1312,14 +1307,12 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
LCallFunction* result = new(zone()) LCallFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
}
@@ -1348,27 +1341,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1385,8 +1370,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1485,17 +1471,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need to
- // use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1515,7 +1494,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1536,7 +1514,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1568,7 +1545,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
@@ -1662,6 +1638,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1670,8 +1648,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1803,6 +1781,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), rax);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -1837,7 +1822,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1899,10 +1883,18 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = NULL;
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange() &&
+ val->range()->upper() != kMaxInt) {
+ return result;
+ }
+ } else {
+ result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
}
return AssignEnvironment(result);
} else {
@@ -1934,12 +1926,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone()) LIsNumberAndBranch(
- UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
@@ -2075,7 +2061,14 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- if (instr->access().IsExternalMemory() && instr->access().offset() == 0) {
+ // Use the special mov rax, moffs64 encoding for external
+ // memory accesses with 64-bit word-sized values.
+ if (instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0 &&
+ (instr->access().representation().IsSmi() ||
+ instr->access().representation().IsTagged() ||
+ instr->access().representation().IsHeapObject() ||
+ instr->access().representation().IsExternal())) {
LOperand* obj = UseRegisterOrConstantAtStart(instr->object());
return DefineFixed(new(zone()) LLoadNamedField(obj), rax);
}
@@ -2098,6 +2091,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2391,7 +2389,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
}
@@ -2513,7 +2510,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index b3d08c8a4c..06cb171923 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -105,7 +105,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -114,12 +113,12 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -182,6 +181,7 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
@@ -213,7 +213,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -254,15 +253,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -272,7 +262,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
@@ -302,7 +292,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -403,17 +392,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
@@ -883,19 +872,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1091,19 +1067,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1259,7 +1222,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1310,8 +1273,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* date() { return inputs_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
@@ -1535,6 +1498,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1923,8 +1895,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -1967,6 +1944,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -2049,7 +2039,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2205,8 +2195,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2544,8 +2536,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
@@ -2562,13 +2554,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2701,7 +2694,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
@@ -2713,7 +2706,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 69abc5454f..a18ff0d274 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -37,6 +37,7 @@
#include "serialize.h"
#include "debug.h"
#include "heap.h"
+#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -605,22 +606,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -635,7 +623,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
+ CEntryStub ces(f->result_size, save_doubles);
CallStub(&ces);
}
@@ -691,13 +679,16 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
@@ -750,7 +741,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
+ movq(rax, reinterpret_cast<Address>(function_address),
RelocInfo::EXTERNAL_REFERENCE);
bind(&end_profiler_check);
@@ -768,7 +759,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
}
// Load the value from ReturnValue
- movq(rax, Operand(rbp, return_value_offset * kPointerSize));
+ movq(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
@@ -783,6 +774,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
movq(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -819,11 +811,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ movq(rsi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -936,6 +936,42 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsByte()) {
+ movzxbl(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsByte()) {
+ movb(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -1423,28 +1459,6 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
}
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1513,10 +1527,14 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
+ Label done;
LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ addq(dst, kScratchRegister);
+ j(no_overflow, &done, Label::kNear);
+ // Restore src.
+ subq(dst, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+ bind(&done);
} else {
LoadSmiConstant(dst, constant);
addq(dst, src);
@@ -1616,6 +1634,29 @@ void MacroAssembler::SmiNeg(Register dst,
}
+template<class T>
+static void SmiAddHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src1)) {
+ Label done;
+ masm->addq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->subq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
+ } else {
+ masm->movq(dst, src1);
+ masm->addq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
@@ -1623,16 +1664,7 @@ void MacroAssembler::SmiAdd(Register dst,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1642,17 +1674,8 @@ void MacroAssembler::SmiAdd(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1675,34 +1698,37 @@ void MacroAssembler::SmiAdd(Register dst,
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+template<class T>
+static void SmiSubHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
+ Label done;
+ masm->subq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->addq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
} else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
+ masm->movq(dst, src1);
+ masm->subq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
}
}
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, kSmiSubtractionOverflow);
+ SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1712,29 +1738,36 @@ void MacroAssembler::SmiSub(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
+template<class T>
+static void SmiSubNoOverflowHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- movq(dst, src1);
+ masm->movq(dst, src1);
}
- subq(dst, src2);
- Assert(no_overflow, kSmiSubtractionOverflow);
+ masm->subq(dst, src2);
+ masm->Assert(no_overflow, kSmiSubtractionOverflow);
+}
+
+
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ ASSERT(!dst.is(src2));
+ SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2) {
+ SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
}
@@ -2240,6 +2273,90 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
// ----------------------------------------------------------------------------
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shrl(mask, Immediate(1));
+ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ isolate()->factory()->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ Register index = scratch;
+ Register probe = mask;
+ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache);
+
+ bind(&is_smi);
+ SmiToInteger32(scratch, object);
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2376,8 +2493,7 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(dst, source);
}
}
@@ -2387,8 +2503,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
movq(dst, kScratchRegister);
}
}
@@ -2399,8 +2514,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
@@ -2411,8 +2525,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
@@ -2423,47 +2536,22 @@ void MacroAssembler::Push(Handle<Object> source) {
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
push(kScratchRegister);
}
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
+void MacroAssembler::MoveHeapObject(Register result,
+ Handle<Object> object) {
AllowDeferredHandleDereference using_raw_address;
+ ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
movq(result, cell, RelocInfo::CELL);
movq(result, Operand(result, 0));
} else {
- Move(result, object);
- }
-}
-
-
-void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(kScratchRegister, cell, RelocInfo::CELL);
- cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- Cmp(reg, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(kScratchRegister, cell, RelocInfo::CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
+ movq(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
@@ -2548,7 +2636,8 @@ void MacroAssembler::Call(Handle<Code> code_object,
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
@@ -2651,7 +2740,8 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2710,7 +2800,8 @@ void MacroAssembler::JumpToHandlerEntry() {
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2750,7 +2841,8 @@ void MacroAssembler::Throw(Register value) {
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2917,7 +3009,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
+ Cvtlsi2sd(xmm_scratch, kScratchRegister);
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
@@ -3050,7 +3142,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label* conversion_failed,
Label::Distance dst) {
cvttsd2si(result_reg, input_reg);
- cvtlsi2sd(xmm0, result_reg);
+ Cvtlsi2sd(xmm0, result_reg);
ucomisd(xmm0, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -3087,7 +3179,7 @@ void MacroAssembler::TaggedToI(Register result_reg,
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, xmm0);
- cvtlsi2sd(temp, result_reg);
+ Cvtlsi2sd(temp, result_reg);
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, dst);
@@ -3472,7 +3564,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- LoadHeapObject(rdi, function);
+ Move(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
@@ -3559,6 +3651,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+ } else {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ push(rdi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
@@ -3590,9 +3706,10 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+ kFPOnStackSize + kPCOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
movq(rbp, rsp);
@@ -3620,7 +3737,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
@@ -3683,23 +3800,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
PushReturnAddressFrom(rcx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movq(rsp, rbp);
pop(rbp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
+ if (restore_context) {
+ movq(rsi, context_operand);
+ }
#ifdef DEBUG
movq(context_operand, Immediate(0));
#endif
@@ -3971,6 +4090,10 @@ void MacroAssembler::Allocate(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if (isolate()->heap_profiler()->is_tracking_allocations()) {
+ RecordObjectAllocation(isolate(), result, object_size);
+ }
+
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4050,6 +4173,10 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if (isolate()->heap_profiler()->is_tracking_allocations()) {
+ RecordObjectAllocation(isolate(), result, object_size);
+ }
+
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
@@ -4791,8 +4918,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -4802,12 +4929,43 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
movq(kScratchRegister, new_space_start);
cmpq(scratch_reg, kScratchRegister);
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
- bind(&no_memento_available);
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ Register object_size) {
+ FrameScope frame(this, StackFrame::EXIT);
+ PushSafepointRegisters();
+ PrepareCallCFunction(3);
+ // In case object is rdx
+ movq(kScratchRegister, object);
+ movq(arg_reg_3, object_size);
+ movq(arg_reg_2, kScratchRegister);
+ movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+ CallCFunction(
+ ExternalReference::record_object_allocation_function(isolate), 3);
+ PopSafepointRegisters();
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ int object_size) {
+ FrameScope frame(this, StackFrame::EXIT);
+ PushSafepointRegisters();
+ PrepareCallCFunction(3);
+ movq(arg_reg_2, object);
+ movq(arg_reg_3, Immediate(object_size));
+ movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+ CallCFunction(
+ ExternalReference::record_object_allocation_function(isolate), 3);
+ PopSafepointRegisters();
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 09c8a800cc..24374349a2 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -282,6 +282,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -302,7 +305,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
@@ -532,15 +535,6 @@ class MacroAssembler: public Assembler {
// Smis represent a subset of integers. The subset is always equivalent to
// a two's complement interpretation of a fixed number of bits.
- // Optimistically adds an integer constant to a supposed smi.
- // If the src is not a smi, or the result is not a smi, jump to
- // the label.
- void SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
void SmiAddConstant(Register dst, Register src, Smi* constant);
@@ -578,8 +572,8 @@ class MacroAssembler: public Assembler {
Label::Distance near_jump = Label::kFar);
// Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
+ // If dst is src1, then src1 will be destroyed if the operation is
+ // successful, otherwise kept intact.
void SmiAdd(Register dst,
Register src1,
Register src2,
@@ -596,18 +590,13 @@ class MacroAssembler: public Assembler {
Register src2);
// Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
+ // If dst is src1, then src1 will be destroyed if the operation is
+ // successful, otherwise kept intact.
void SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
void SmiSub(Register dst,
Register src1,
const Operand& src2,
@@ -616,6 +605,10 @@ class MacroAssembler: public Assembler {
void SmiSub(Register dst,
Register src1,
+ Register src2);
+
+ void SmiSub(Register dst,
+ Register src1,
const Operand& src2);
// Multiplies smi values and return the result as a smi,
@@ -739,6 +732,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// If object is a string, its map is loaded into object_map.
void JumpIfNotString(Register object,
Register object_map,
@@ -780,10 +784,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Macro instructions.
+ // Load/store with specific representation.
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(const Operand& dst, Register src, Representation r);
+
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtlsi2sd(XMMRegister dst, Register src);
+ void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -801,27 +815,7 @@ class MacroAssembler: public Assembler {
// Load a heap object and handle the case of new-space objects by
// indirecting via a global cell.
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void CmpHeapObject(Register reg, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
- void CmpObject(Register reg, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- CmpHeapObject(reg, Handle<HeapObject>::cast(object));
- } else {
- Cmp(reg, object);
- }
- }
+ void MoveHeapObject(Register result, Handle<Object> object);
// Load a global cell into a register.
void LoadGlobalCell(Register dst, Handle<Cell> cell);
@@ -835,6 +829,8 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
+ void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
+ void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -1104,6 +1100,15 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
+ // Record a JS object allocation if allocations tracking mode is on.
+ void RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ Register object_size);
+
+ void RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ int object_size);
+
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
// object(s) no longer allocated as they would be invalid when allocation is
@@ -1232,13 +1237,20 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1274,7 +1286,8 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_rbp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1384,9 +1397,20 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to equal
+ // If allocation info is present, condition flags are set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
private:
// Order general registers are pushed by Pushad.
@@ -1430,7 +1454,7 @@ class MacroAssembler: public Assembler {
// accessible via StackSpaceOperand.
void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
// Loads the top of new-space into the result register.
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 95276d530d..2a0c3675f2 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -304,32 +304,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
__ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -447,88 +443,96 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- rsp[16] : api function
- // (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : isolate
- // -- rsp[40] : ReturnValue default value
- // -- rsp[48] : ReturnValue
- //
- // -- rsp[56] : last argument
+ // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- rsp[64] : last argument
// -- ...
- // -- rsp[(argc + 6) * 8] : first argument
- // -- rsp[(argc + 7) * 8] : receiver
+ // -- rsp[(argc + 7) * 8] : first argument
+ // -- rsp[(argc + 8) * 8] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
+
+ // Save calling context.
+ int offset = argc + kFastApiCallArguments;
+ __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(rdi, function);
+ __ Move(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- int api_call_argc = argc + kFastApiCallArguments;
- StackArgumentsAccessor args(rsp, api_call_argc);
-
- // Pass the additional arguments.
- __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi);
+ // Construct the FunctionCallbackInfo on the stack.
+ __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx);
+ __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
} else {
- __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data);
+ __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
}
__ movq(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
+ kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister);
- __ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
+ kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
+ kScratchRegister);
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
- __ lea(rbx, Operand(rsp, kFastApiCallArguments * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(rbx, Operand(rsp, 1 * kPointerSize));
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4;
__ PrepareCallApiFunction(kApiStackSpace);
- __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
- __ addq(rbx, Immediate(argc * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_.
- __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
+ __ movq(StackSpaceOperand(0), rbx); // FunctionCallbackInfo::implicit_args_.
+ __ addq(rbx, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ __ movq(StackSpaceOperand(1), rbx); // FunctionCallbackInfo::values_.
+ __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
+ // FunctionCallbackInfo::is_construct_call_.
__ Set(StackSpaceOperand(3), 0);
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- __ CallApiFunctionAndReturn(function_address,
- thunk_address,
- callback_arg,
- api_call_argc + 1,
- kFastApiCallArguments + 1);
+ StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
+ kFastApiCallArguments - 1 - FCA::kContextSaveIndex);
+ Operand return_value_operand = args_from_rbp.GetArgumentOperand(
+ kFastApiCallArguments - 1 - FCA::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(
+ function_address,
+ thunk_address,
+ callback_arg,
+ argc + kFastApiCallArguments + 1,
+ return_value_operand,
+ restore_context ? &context_restore_operand : NULL);
}
@@ -542,26 +546,26 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Copy return value.
- __ movq(scratch, Operand(rsp, 0));
- // Assign stack space for the call arguments.
- __ subq(rsp, Immediate(stack_space * kPointerSize));
- // Move the return address on top of the stack.
- __ movq(Operand(rsp, 0), scratch);
+ const int fast_api_call_argc = argc + kFastApiCallArguments;
+ StackArgumentsAccessor args(rsp, fast_api_call_argc);
+ // argc + 1 is the argument number before FastApiCall arguments, 1 ~ receiver
+ const int kHolderIndex = argc + 1 +
+ kFastApiCallArguments - 1 - FunctionCallbackArguments::kHolderIndex;
+ __ movq(scratch, StackOperandForReturnAddress(0));
+ // Assign stack space for the call arguments and receiver.
+ __ subq(rsp, Immediate((fast_api_call_argc + 1) * kPointerSize));
+ __ movq(StackOperandForReturnAddress(0), scratch);
// Write holder to stack frame.
- __ movq(Operand(rsp, 1 * kPointerSize), receiver);
- // Write receiver to stack frame.
- int index = stack_space;
- __ movq(Operand(rsp, index-- * kPointerSize), receiver);
+ __ movq(args.GetArgumentOperand(kHolderIndex), receiver);
+ __ movq(args.GetReceiverOperand(), receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
ASSERT(!receiver.is(values[i]));
ASSERT(!scratch.is(values[i]));
- __ movq(Operand(rsp, index-- * kPointerSize), values[i]);
+ __ movq(args.GetArgumentOperand(i + 1), values[i]);
}
- GenerateFastApiCall(masm, optimization, argc);
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -675,7 +679,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -763,9 +767,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ Move(this->name(), name);
@@ -791,7 +795,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -809,19 +813,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -830,7 +834,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ CmpObject(value_reg, constant);
+ __ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -842,7 +846,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch1, value_reg);
- __ cvtlsi2sd(xmm0, scratch1);
+ __ Cvtlsi2sd(xmm0, scratch1);
__ jmp(&do_store);
__ bind(&heap_number);
@@ -954,15 +958,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -996,7 +1000,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch2, value_reg);
- __ cvtlsi2sd(xmm0, scratch2);
+ __ Cvtlsi2sd(xmm0, scratch2);
__ jmp(&do_store);
__ bind(&heap_number);
@@ -1107,8 +1111,13 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ const int kHolderIndex = kFastApiCallArguments - 1 -
+ FunctionCallbackArguments::kHolderIndex;
+
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPCOnStackSize), object_reg);
+ __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
}
// Check the maps in the prototype chain.
@@ -1168,7 +1177,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPCOnStackSize), reg);
+ __ movq(args.GetArgumentOperand(kHolderIndex), reg);
}
// Go to the next object in the prototype chain.
@@ -1200,9 +1209,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
__ bind(miss);
@@ -1211,9 +1220,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
GenerateRestoreName(masm(), miss, name);
@@ -1222,7 +1231,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1273,7 +1282,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+void LoadStubCompiler::NonexistentHandlerFrontend(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@@ -1293,7 +1302,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
+void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
Representation representation) {
@@ -1312,26 +1321,27 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch4().is(reg));
__ PopReturnAddressTo(scratch4());
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
__ push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
@@ -1349,7 +1359,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(reg); // holder
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
Address getter_address = v8::ToCData<Address>(callback->getter());
@@ -1374,10 +1384,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kArgStackSpace = 1;
__ PrepareCallApiFunction(kArgStackSpace);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ lea(rax, Operand(name_arg, 6 * kPointerSize));
+ __ lea(rax, Operand(name_arg, 1 * kPointerSize));
- // v8::AccessorInfo::args_.
+ // v8::PropertyAccessorInfo::args_.
__ movq(StackSpaceOperand(0), rax);
// The context register (rsi) has been saved in PrepareCallApiFunction and
@@ -1386,22 +1395,28 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ // The name handler is counted as an argument.
+ StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
+ Operand return_value_operand = args.GetArgumentOperand(
+ PropertyCallbackArguments::kArgsLength - 1 -
+ PropertyCallbackArguments::kReturnValueOffset);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
getter_arg,
kStackSpace,
- 6);
+ return_value_operand,
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadObject(rax, value);
+ __ Move(rax, value);
__ ret(0);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> interceptor_holder,
@@ -2183,7 +2198,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, args.GetArgumentOperand(argc - 1));
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2196,7 +2211,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Load the char code argument.
Register code = rbx;
- __ movq(code, args.GetArgumentOperand(argc));
+ __ movq(code, args.GetArgumentOperand(1));
// Check the code is a smi.
Label slow;
@@ -2246,6 +2261,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- rsp[(argc + 1) * 4] : receiver
// -----------------------------------
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2257,7 +2273,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(rdx, &miss);
@@ -2272,7 +2288,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
}
// Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label smi;
@@ -2339,7 +2355,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return the argument (when it's an already round heap number).
__ bind(&already_round);
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(1));
__ ret(2 * kPointerSize);
// Tail call the full function. We do not have to patch the receiver
@@ -2383,7 +2399,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, args.GetArgumentOperand(argc - 1));
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2394,7 +2410,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(argc));
+ __ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label not_smi;
@@ -2424,7 +2440,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Check if the argument is a heap number and load its value.
__ bind(&not_smi);
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
@@ -2442,7 +2458,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ bind(&negative_sign);
__ xor_(rbx, rdi);
__ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
__ ret(2 * kPointerSize);
// Tail call the full function. We do not have to patch the receiver
@@ -2508,7 +2524,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
__ movq(StackOperandForReturnAddress(0), rax);
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -3006,6 +3022,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -3017,7 +3034,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(rax);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 651b8d7ad1..103eaeb126 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -25,5 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Too slow in Debug mode.
-octane/mandreel: PASS, SKIP if $mode == debug
+[
+['mode == debug', {
+ # Too slow in Debug mode.
+ 'octane/mandreel': [SKIP],
+}], # 'mode == debug'
+]
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 5fb3f51c75..b15553a861 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -172,7 +172,7 @@ class BenchmarksTestSuite(testsuite.TestSuite):
os.chdir(old_cwd)
- def VariantFlags(self):
+ def VariantFlags(self, testcase, default_flags):
# Both --nocrankshaft and --stressopt are very slow.
return [[]]
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 616c6a3a6b..4aa9c7eb71 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -29,13 +29,20 @@
#include "cctest.h"
#include "debug.h"
+enum InitializationState {kUnset, kUnintialized, kInitialized};
+static InitializationState initialization_state_ = kUnset;
+static bool disable_automatic_dispose_ = false;
CcTest* CcTest::last_ = NULL;
+bool CcTest::initialize_called_ = false;
+bool CcTest::isolate_used_ = false;
+v8::Isolate* CcTest::isolate_ = NULL;
CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
- const char* dependency, bool enabled)
- : callback_(callback), name_(name), dependency_(dependency), prev_(last_) {
+ const char* dependency, bool enabled, bool initialize)
+ : callback_(callback), name_(name), dependency_(dependency),
+ enabled_(enabled), initialize_(initialize), prev_(last_) {
// Find the base name of this test (const_cast required on Windows).
char *basename = strrchr(const_cast<char *>(file), '/');
if (!basename) {
@@ -51,35 +58,49 @@ CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
if (extension) *extension = 0;
// Install this test in the list of tests
file_ = basename;
- enabled_ = enabled;
prev_ = last_;
last_ = this;
}
-v8::Persistent<v8::Context> CcTest::context_;
+void CcTest::Run() {
+ if (!initialize_) {
+ CHECK(initialization_state_ != kInitialized);
+ initialization_state_ = kUnintialized;
+ CHECK(CcTest::isolate_ == NULL);
+ } else {
+ CHECK(initialization_state_ != kUnintialized);
+ initialization_state_ = kInitialized;
+ if (isolate_ == NULL) {
+ isolate_ = v8::Isolate::New();
+ }
+ isolate_->Enter();
+ }
+ callback_();
+ if (initialize_) {
+ isolate_->Exit();
+ }
+}
-void CcTest::InitializeVM(CcTestExtensionFlags extensions) {
- const char* extension_names[kMaxExtensions];
- int extension_count = 0;
-#define CHECK_EXTENSION_FLAG(Name, Id) \
- if (extensions.Contains(Name##_ID)) extension_names[extension_count++] = Id;
- EXTENSION_LIST(CHECK_EXTENSION_FLAG)
-#undef CHECK_EXTENSION_FLAG
- v8::Isolate* isolate = default_isolate();
- if (context_.IsEmpty()) {
- v8::HandleScope scope(isolate);
+v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extensions,
+ v8::Isolate* isolate) {
+ const char* extension_names[kMaxExtensions];
+ int extension_count = 0;
+ #define CHECK_EXTENSION_FLAG(Name, Id) \
+ if (extensions.Contains(Name##_ID)) extension_names[extension_count++] = Id;
+ EXTENSION_LIST(CHECK_EXTENSION_FLAG)
+ #undef CHECK_EXTENSION_FLAG
v8::ExtensionConfiguration config(extension_count, extension_names);
v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
- context_.Reset(isolate, context);
- }
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(isolate, context_);
- context->Enter();
- }
+ CHECK(!context.IsEmpty());
+ return context;
+}
+
+
+void CcTest::DisableAutomaticDispose() {
+ CHECK_EQ(kUnintialized, initialization_state_);
+ disable_automatic_dispose_ = true;
}
@@ -95,9 +116,6 @@ static void PrintTestList(CcTest* current) {
}
-v8::Isolate* CcTest::default_isolate_;
-
-
class CcTestArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void* Allocate(size_t length) { return malloc(length); }
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
@@ -115,13 +133,14 @@ static void SuggestTestHarness(int tests) {
int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
+ i::Isolate::SetCrashIfDefaultIsolateInitialized();
+
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
CcTestArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
- CcTest::set_default_isolate(v8::Isolate::GetCurrent());
- CHECK(CcTest::default_isolate() != NULL);
int tests_run = 0;
bool print_run_count = true;
for (int i = 1; i < argc; i++) {
@@ -169,7 +188,7 @@ int main(int argc, char* argv[]) {
}
if (print_run_count && tests_run != 1)
printf("Ran %i tests.\n", tests_run);
- v8::V8::Dispose();
+ if (!disable_automatic_dispose_) v8::V8::Dispose();
return 0;
}
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index ee7ffad6d3..fbe38f2709 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -56,6 +56,7 @@
'test-circular-queue.cc',
'test-compiler.cc',
'test-condition-variable.cc',
+ 'test-constantpool.cc',
'test-conversions.cc',
'test-cpu.cc',
'test-cpu-profiler.cc',
@@ -139,13 +140,15 @@
'test-assembler-arm.cc',
'test-code-stubs.cc',
'test-code-stubs-arm.cc',
- 'test-disasm-arm.cc'
+ 'test-disasm-arm.cc',
+ 'test-macro-assembler-arm.cc'
],
}],
['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
'test-disasm-mips.cc',
+ 'test-macro-assembler-mips.cc'
],
}],
[ 'OS=="linux"', {
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index ceb97743eb..7f84c259f0 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -31,23 +31,30 @@
#include "v8.h"
#ifndef TEST
-#define TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, true); \
+#define TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, true, true); \
+ static void Test##Name()
+#endif
+
+#ifndef UNINITIALIZED_TEST
+#define UNINITIALIZED_TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, true, false); \
static void Test##Name()
#endif
#ifndef DEPENDENT_TEST
-#define DEPENDENT_TEST(Name, Dep) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, #Dep, true); \
+#define DEPENDENT_TEST(Name, Dep) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, #Dep, true, true); \
static void Test##Name()
#endif
#ifndef DISABLED_TEST
-#define DISABLED_TEST(Name) \
- static void Test##Name(); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, false); \
+#define DISABLED_TEST(Name) \
+ static void Test##Name(); \
+ CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, false, true); \
static void Test##Name()
#endif
@@ -71,51 +78,70 @@ typedef v8::internal::EnumSet<CcTestExtensionIds> CcTestExtensionFlags;
EXTENSION_LIST(DEFINE_EXTENSION_FLAG)
#undef DEFINE_EXTENSION_FLAG
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
class CcTest {
public:
typedef void (TestFunction)();
CcTest(TestFunction* callback, const char* file, const char* name,
- const char* dependency, bool enabled);
- void Run() { callback_(); }
+ const char* dependency, bool enabled, bool initialize);
+ void Run();
static CcTest* last() { return last_; }
CcTest* prev() { return prev_; }
const char* file() { return file_; }
const char* name() { return name_; }
const char* dependency() { return dependency_; }
bool enabled() { return enabled_; }
- static v8::Isolate* default_isolate() { return default_isolate_; }
- static v8::Handle<v8::Context> env() {
- return v8::Local<v8::Context>::New(default_isolate_, context_);
+ static v8::Isolate* isolate() {
+ CHECK(isolate_ != NULL);
+ isolate_used_ = true;
+ return isolate_;
}
- static v8::Isolate* isolate() { return default_isolate_; }
-
static i::Isolate* i_isolate() {
- return reinterpret_cast<i::Isolate*>(default_isolate_);
+ return reinterpret_cast<i::Isolate*>(isolate());
+ }
+
+ static i::Heap* heap() {
+ return i_isolate()->heap();
+ }
+
+ static v8::Local<v8::Object> global() {
+ return isolate()->GetCurrentContext()->Global();
+ }
+
+ // TODO(dcarney): Remove.
+ // This must be called first in a test.
+ static void InitializeVM() {
+ CHECK(!isolate_used_);
+ CHECK(!initialize_called_);
+ initialize_called_ = true;
+ v8::HandleScope handle_scope(CcTest::isolate());
+ v8::Context::New(CcTest::isolate())->Enter();
}
- // Helper function to initialize the VM.
- static void InitializeVM(CcTestExtensionFlags extensions = NO_EXTENSIONS);
+ // Only for UNINITIALIZED_TESTs
+ static void DisableAutomaticDispose();
+
+ // Helper function to configure a context.
+ // Must be in a HandleScope.
+ static v8::Local<v8::Context> NewContext(
+ CcTestExtensionFlags extensions,
+ v8::Isolate* isolate = CcTest::isolate());
private:
friend int main(int argc, char** argv);
- static void set_default_isolate(v8::Isolate* default_isolate) {
- default_isolate_ = default_isolate;
- }
TestFunction* callback_;
const char* file_;
const char* name_;
const char* dependency_;
bool enabled_;
+ bool initialize_;
CcTest* prev_;
static CcTest* last_;
- static v8::Isolate* default_isolate_;
- static v8::Persistent<v8::Context> context_;
+ static v8::Isolate* isolate_;
+ static bool initialize_called_;
+ static bool isolate_used_;
};
// Switches between all the Api tests using the threading support.
@@ -211,20 +237,19 @@ class RegisterThreadedTest {
// A LocalContext holds a reference to a v8::Context.
class LocalContext {
public:
+ LocalContext(v8::Isolate* isolate,
+ v8::ExtensionConfiguration* extensions = 0,
+ v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Handle<v8::ObjectTemplate>(),
+ v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>()) {
+ Initialize(isolate, extensions, global_template, global_object);
+ }
+
LocalContext(v8::ExtensionConfiguration* extensions = 0,
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>(),
v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>()) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate,
- extensions,
- global_template,
- global_object);
- context_.Reset(isolate, context);
- context->Enter();
- // We can't do this later perhaps because of a fatal error.
- isolate_ = context->GetIsolate();
+ Initialize(CcTest::isolate(), extensions, global_template, global_object);
}
virtual ~LocalContext() {
@@ -244,6 +269,21 @@ class LocalContext {
}
private:
+ void Initialize(v8::Isolate* isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::Handle<v8::ObjectTemplate> global_template,
+ v8::Handle<v8::Value> global_object) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate,
+ extensions,
+ global_template,
+ global_object);
+ context_.Reset(isolate, context);
+ context->Enter();
+ // We can't do this later perhaps because of a fatal error.
+ isolate_ = isolate;
+ }
+
v8::Persistent<v8::Context> context_;
v8::Isolate* isolate_;
};
@@ -308,4 +348,26 @@ static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
}
+// Helper class for new allocations tracking and checking.
+// To use checking of JS allocations tracking in a test,
+// just create an instance of this class.
+class HeapObjectsTracker {
+ public:
+ HeapObjectsTracker() {
+ heap_profiler_ = i::Isolate::Current()->heap_profiler();
+ CHECK_NE(NULL, heap_profiler_);
+ heap_profiler_->StartHeapAllocationsRecording();
+ }
+
+ ~HeapObjectsTracker() {
+ i::Isolate::Current()->heap()->CollectAllAvailableGarbage();
+ CHECK_EQ(0, heap_profiler_->FindUntrackedObjects());
+ heap_profiler_->StopHeapAllocationsRecording();
+ }
+
+ private:
+ i::HeapProfiler* heap_profiler_;
+};
+
+
#endif // ifndef CCTEST_H_
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 85661098b2..59bf8268e9 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -25,107 +25,112 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix cctest
+[
+[ALWAYS, {
+ # All tests prefixed with 'Bug' are expected to fail.
+ 'test-api/Bug*': [FAIL],
-# All tests prefixed with 'Bug' are expected to fail.
-test-api/Bug*: FAIL
+ ##############################################################################
+ # BUG(382): Weird test. Can't guarantee that it never times out.
+ 'test-api/ApplyInterruption': [PASS, TIMEOUT],
-##############################################################################
-# BUG(382): Weird test. Can't guarantee that it never times out.
-test-api/ApplyInterruption: PASS || TIMEOUT
-
-# TODO(mstarzinger): Fail gracefully on multiple V8::Dispose calls.
-test-api/InitializeAndDisposeOnce: SKIP
-test-api/InitializeAndDisposeMultiple: SKIP
+ # TODO(mstarzinger): Fail gracefully on multiple V8::Dispose calls.
+ 'test-api/InitializeAndDisposeOnce': [SKIP],
+ 'test-api/InitializeAndDisposeMultiple': [SKIP],
-# These tests always fail. They are here to test test.py. If
-# they don't fail then test.py has failed.
-test-serialize/TestThatAlwaysFails: FAIL
-test-serialize/DependentTestThatAlwaysFails: FAIL
+ # These tests always fail. They are here to test test.py. If
+ # they don't fail then test.py has failed.
+ 'test-serialize/TestThatAlwaysFails': [FAIL],
+ 'test-serialize/DependentTestThatAlwaysFails': [FAIL],
-# This test always fails. It tests that LiveEdit causes abort when turned off.
-test-debug/LiveEditDisabled: FAIL
+ # This test always fails. It tests that LiveEdit causes abort when turned off.
+ 'test-debug/LiveEditDisabled': [FAIL],
-# TODO(gc): Temporarily disabled in the GC branch.
-test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
+ # TODO(gc): Temporarily disabled in the GC branch.
+ 'test-log/EquivalenceOfLoggingAndTraversal': [PASS, FAIL],
-# We do not yet shrink weak maps after they have been emptied by the GC
-test-weakmaps/Shrinking: FAIL
-test-weaksets/WeakSet_Shrinking: FAIL
+ # We do not yet shrink weak maps after they have been emptied by the GC
+ 'test-weakmaps/Shrinking': [FAIL],
+ 'test-weaksets/WeakSet_Shrinking': [FAIL],
-# Boot up memory use is bloated in debug mode.
-test-mark-compact/BootUpMemoryUse: PASS, PASS || FAIL if $mode == debug
+ # Boot up memory use is bloated in debug mode.
+ 'test-mark-compact/BootUpMemoryUse': [PASS, PASS, ['mode == debug', FAIL]],
-# Some CPU profiler tests are flaky.
-test-cpu-profiler/*: PASS || FLAKY
+ # This tests only that the preparser and parser agree, so there is no point in
+ # running several variants. Note that this still takes ages, because there
+ # are actually 13 * 38 * 5 * 128 = 316160 individual tests hidden here.
+ 'test-parsing/ParserSync': [PASS, NO_VARIANTS],
+}], # ALWAYS
##############################################################################
-[ $arch == arm ]
-
-# We cannot assume that we can throw OutOfMemory exceptions in all situations.
-# Apparently our ARM box is in such a state. Skip the test as it also runs for
-# a long time.
-test-api/OutOfMemory: SKIP
-test-api/OutOfMemoryNested: SKIP
-
-# BUG(355): Test crashes on ARM.
-test-log/ProfLazyMode: SKIP
-
-# BUG(1075): Unresolved crashes.
-test-serialize/Deserialize: SKIP
-test-serialize/DeserializeFromSecondSerializationAndRunScript2: SKIP
-test-serialize/DeserializeAndRunScript2: SKIP
-test-serialize/DeserializeFromSecondSerialization: SKIP
-
-# BUG(2874): Threading problems.
-test-api/*: PASS || FLAKY
+['arch == arm', {
+
+ # We cannot assume that we can throw OutOfMemory exceptions in all situations.
+ # Apparently our ARM box is in such a state. Skip the test as it also runs for
+ # a long time.
+ 'test-api/OutOfMemory': [SKIP],
+ 'test-api/OutOfMemoryNested': [SKIP],
+
+ # BUG(355): Test crashes on ARM.
+ 'test-log/ProfLazyMode': [SKIP],
+
+ # BUG(1075): Unresolved crashes.
+ 'test-serialize/Deserialize': [SKIP],
+ 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
+ 'test-serialize/DeserializeAndRunScript2': [SKIP],
+ 'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+}], # 'arch == arm'
##############################################################################
-[ $arch == mipsel ]
+['arch == mipsel', {
-# BUG(2657): Test sometimes times out on MIPS simulator.
-test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate: PASS || TIMEOUT
+ # BUG(2657): Test sometimes times out on MIPS simulator.
+ 'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
-# BUG(1075): Unresolved crashes on MIPS also.
-test-serialize/Deserialize: SKIP
-test-serialize/DeserializeFromSecondSerializationAndRunScript2: SKIP
-test-serialize/DeserializeAndRunScript2: SKIP
-test-serialize/DeserializeFromSecondSerialization: SKIP
+ # BUG(1075): Unresolved crashes on MIPS also.
+ 'test-serialize/Deserialize': [SKIP],
+ 'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
+ 'test-serialize/DeserializeAndRunScript2': [SKIP],
+ 'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+}], # 'arch == mipsel'
##############################################################################
-[ $arch == android_arm || $arch == android_ia32 ]
+['arch == android_arm or arch == android_ia32', {
-# Tests crash as there is no /tmp directory in Android.
-test-log/LogAccessorCallbacks: SKIP
-test-log/LogCallbacks: SKIP
-test-log/ProfLazyMode: SKIP
+ # Tests crash as there is no /tmp directory in Android.
+ 'test-log/LogAccessorCallbacks': [SKIP],
+ 'test-log/LogCallbacks': [SKIP],
+ 'test-log/ProfLazyMode': [SKIP],
-# platform-tls.h does not contain an ANDROID-related header.
-test-platform-tls/FastTLS: SKIP
+ # platform-tls.h does not contain an ANDROID-related header.
+ 'test-platform-tls/FastTLS': [SKIP],
-# This test times out.
-test-threads/ThreadJoinSelf: SKIP
+ # This test times out.
+ 'test-threads/ThreadJoinSelf': [SKIP],
+}], # 'arch == android_arm or arch == android_ia32'
##############################################################################
-[ $arch == nacl_ia32 || $arch == nacl_x64 ]
-
-# NaCl builds have problems with threaded tests since Pepper_28.
-# V8 Issue 2786
-test-api/Threading1: SKIP
-test-lockers/MultithreadedParallelIsolates: SKIP
-test-lockers/ExtensionsRegistration: SKIP
-
-# These tests fail as there is no /tmp directory in Native Client.
-test-log/LogAccessorCallbacks: SKIP
-test-log/LogCallbacks: SKIP
-test-log/ProfLazyMode: SKIP
-
-# Native Client doesn't support sockets.
-test-debug/DebuggerAgent: SKIP
-test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
-test-socket/Socket: SKIP
-
-# Profiling doesn't work on Native Client.
-test-cpu-profiler/*: SKIP
-
-# Fails since 16322 (new test).
-test-code-stubs-arm/ConvertDToI: SKIP
+['arch == nacl_ia32 or arch == nacl_x64', {
+
+ # NaCl builds have problems with threaded tests since Pepper_28.
+ # V8 Issue 2786
+ 'test-api/Threading1': [SKIP],
+ 'test-lockers/MultithreadedParallelIsolates': [SKIP],
+ 'test-lockers/ExtensionsRegistration': [SKIP],
+
+ # These tests fail as there is no /tmp directory in Native Client.
+ 'test-log/LogAccessorCallbacks': [SKIP],
+ 'test-log/LogCallbacks': [SKIP],
+ 'test-log/ProfLazyMode': [SKIP],
+
+ # Native Client doesn't support sockets.
+ 'test-debug/DebuggerAgent': [SKIP],
+ 'test-debug/DebuggerAgentProtocolOverflowHeader': [SKIP],
+ 'test-socket/Socket': [SKIP],
+
+ # Profiling doesn't work on Native Client.
+ 'test-cpu-profiler/*': [SKIP],
+
+ # Fails since 16322 (new test).
+ 'test-code-stubs-arm/ConvertDToI': [SKIP],
+}], # 'arch == nacl_ia32 or arch == nacl_x64'
+]
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 2aaac922bf..df4937ee28 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -120,7 +120,7 @@ THREADED_TEST(GlobalVariableAccess) {
foo = 0;
bar = -4;
baz = 10;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
templ->InstanceTemplate()->SetAccessor(v8_str("foo"),
GetIntValue,
@@ -148,7 +148,7 @@ static v8::Handle<v8::Object> x_holder;
template<class Info>
static void XGetter(const Info& info, int offset) {
ApiTestFuzzer::Fuzz();
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
CHECK_EQ(x_receiver, info.This());
info.GetReturnValue().Set(v8_num(x_register[offset]));
@@ -170,7 +170,7 @@ static void XGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
template<class Info>
static void XSetter(Local<Value> value, const Info& info, int offset) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
CHECK_EQ(x_holder, info.This());
CHECK_EQ(x_holder, info.Holder());
@@ -293,7 +293,7 @@ THREADED_TEST(HandleScopePop) {
obj->SetAccessor(v8_str("many"), HandleAllocatingGetter<1024>);
v8::Handle<v8::Object> inst = obj->NewInstance();
context->Global()->Set(v8::String::New("obj"), inst);
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
int count_before = i::HandleScope::NumberOfHandles(isolate);
{
v8::HandleScope scope(context->GetIsolate());
@@ -310,15 +310,15 @@ THREADED_TEST(HandleScopePop) {
static void CheckAccessorArgsCorrect(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(info.GetIsolate() == v8::Isolate::GetCurrent());
+ CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
ApiTestFuzzer::Fuzz();
- CHECK(info.GetIsolate() == v8::Isolate::GetCurrent());
+ CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK(info.GetIsolate() == v8::Isolate::GetCurrent());
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
info.GetReturnValue().Set(17);
@@ -354,7 +354,8 @@ static void EmptyGetter(Local<String> name,
THREADED_TEST(EmptyResult) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
v8::Handle<v8::Object> inst = obj->NewInstance();
@@ -362,7 +363,7 @@ THREADED_TEST(EmptyResult) {
Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
for (int i = 0; i < 10; i++) {
Local<Value> result = scr->Run();
- CHECK(result == v8::Undefined());
+ CHECK(result == v8::Undefined(isolate));
}
}
@@ -370,7 +371,8 @@ THREADED_TEST(EmptyResult) {
THREADED_TEST(NoReuseRegress) {
// Check that the IC generated for the one test doesn't get reused
// for the other.
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
{
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
@@ -380,7 +382,7 @@ THREADED_TEST(NoReuseRegress) {
Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
for (int i = 0; i < 2; i++) {
Local<Value> result = scr->Run();
- CHECK(result == v8::Undefined());
+ CHECK(result == v8::Undefined(isolate));
}
}
{
@@ -405,14 +407,14 @@ static void ThrowingGetAccessor(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- v8::ThrowException(v8_str("g"));
+ info.GetIsolate()->ThrowException(v8_str("g"));
}
static void ThrowingSetAccessor(Local<String> name,
Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- v8::ThrowException(value);
+ info.GetIsolate()->ThrowException(value);
}
@@ -505,7 +507,7 @@ THREADED_TEST(StackIteration) {
static void AllocateHandles(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
for (int i = 0; i < i::kHandleBlockSize + 1; i++) {
- v8::Local<v8::Value>::New(name);
+ v8::Local<v8::Value>::New(info.GetIsolate(), name);
}
info.GetReturnValue().Set(v8::Integer::New(100));
}
@@ -554,7 +556,7 @@ THREADED_TEST(JSONStringifyNamedInterceptorObject) {
}
-THREADED_TEST(CrossContextAccess) {
+THREADED_TEST(AccessorPropertyCrossContext) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index f4f13d0d83..7a5979a951 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -37,7 +37,7 @@ using namespace v8::internal;
static MaybeObject* AllocateAfterFailures() {
static int attempts = 0;
if (++attempts < 3) return Failure::RetryAfterGC();
- Heap* heap = Isolate::Current()->heap();
+ Heap* heap = CcTest::heap();
// New space.
SimulateFullSpace(heap->new_space());
@@ -50,7 +50,7 @@ static MaybeObject* AllocateAfterFailures() {
CHECK(!heap->AllocateHeapNumber(0.42)->IsFailure());
CHECK(!heap->AllocateArgumentsObject(Smi::FromInt(87), 10)->IsFailure());
Object* object = heap->AllocateJSObject(
- *Isolate::Current()->object_function())->ToObjectChecked();
+ *CcTest::i_isolate()->object_function())->ToObjectChecked();
CHECK(!heap->CopyJSObject(JSObject::cast(object))->IsFailure());
// Old data space.
@@ -81,7 +81,7 @@ static MaybeObject* AllocateAfterFailures() {
// Test that we can allocate in old pointer space and code space.
SimulateFullSpace(heap->code_space());
CHECK(!heap->AllocateFixedArray(100, TENURED)->IsFailure());
- CHECK(!heap->CopyCode(Isolate::Current()->builtins()->builtin(
+ CHECK(!heap->CopyCode(CcTest::i_isolate()->builtins()->builtin(
Builtins::kIllegal))->IsFailure());
// Return success.
@@ -90,13 +90,13 @@ static MaybeObject* AllocateAfterFailures() {
static Handle<Object> Test() {
- CALL_HEAP_FUNCTION(Isolate::Current(), AllocateAfterFailures(), Object);
+ CALL_HEAP_FUNCTION(CcTest::i_isolate(), AllocateAfterFailures(), Object);
}
TEST(StressHandles) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Handle<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
Handle<Object> o = Test();
CHECK(o->IsSmi() && Smi::cast(*o)->value() == 42);
@@ -117,17 +117,17 @@ const AccessorDescriptor kDescriptor = {
TEST(StressJS) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Handle<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
Handle<JSFunction> function =
factory->NewFunction(factory->function_string(), factory->null_value());
// Force the creation of an initial map and set the code to
// something empty.
factory->NewJSObject(function);
- function->ReplaceCode(Isolate::Current()->builtins()->builtin(
+ function->ReplaceCode(CcTest::i_isolate()->builtins()->builtin(
Builtins::kEmptyFunction));
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map());
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index f4e40cdd38..d5e838ebe0 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -84,6 +84,7 @@ using ::v8::Value;
} \
THREADED_TEST(Name)
+
void RunWithProfiler(void (*test)()) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -185,7 +186,7 @@ TEST(InitializeAndDisposeMultiple) {
THREADED_TEST(Handles) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<Context> local_env;
{
LocalContext env;
@@ -196,7 +197,7 @@ THREADED_TEST(Handles) {
CHECK(!local_env.IsEmpty());
local_env->Enter();
- v8::Handle<v8::Primitive> undef = v8::Undefined();
+ v8::Handle<v8::Primitive> undef = v8::Undefined(CcTest::isolate());
CHECK(!undef.IsEmpty());
CHECK(undef->IsUndefined());
@@ -210,17 +211,17 @@ THREADED_TEST(Handles) {
THREADED_TEST(IsolateOfContext) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Handle<Context> env = Context::New(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Handle<Context> env = Context::New(CcTest::isolate());
CHECK(!env->InContext());
- CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
+ CHECK(env->GetIsolate() == CcTest::isolate());
env->Enter();
CHECK(env->InContext());
- CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
+ CHECK(env->GetIsolate() == CcTest::isolate());
env->Exit();
CHECK(!env->InContext());
- CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
+ CHECK(env->GetIsolate() == CcTest::isolate());
}
@@ -383,8 +384,9 @@ THREADED_TEST(ArgumentSignature) {
THREADED_TEST(HulIgennem) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::Primitive> undef = v8::Undefined();
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Primitive> undef = v8::Undefined(isolate);
Local<String> undef_str = undef->ToString();
char* value = i::NewArray<char>(undef_str->Utf8Length() + 1);
undef_str->WriteUtf8(value);
@@ -395,7 +397,8 @@ THREADED_TEST(HulIgennem) {
THREADED_TEST(Access) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
Local<v8::Object> obj = v8::Object::New();
Local<Value> foo_before = obj->Get(v8_str("foo"));
CHECK(foo_before->IsUndefined());
@@ -515,11 +518,11 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::TWO_BYTE_ENCODING, encoding);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(0, dispose_count);
}
- v8::internal::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(1, dispose_count);
}
@@ -544,11 +547,11 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(0, dispose_count);
}
- i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(1, dispose_count);
}
@@ -561,8 +564,8 @@ THREADED_TEST(ScriptMakingExternalString) {
v8::HandleScope scope(env->GetIsolate());
Local<String> source = String::New(two_byte_source);
// Trigger GCs so that the newly allocated string moves to old gen.
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
CHECK_EQ(source->IsExternal(), false);
CHECK_EQ(source->IsExternalAscii(), false);
String::Encoding encoding = String::UNKNOWN_ENCODING;
@@ -575,11 +578,11 @@ THREADED_TEST(ScriptMakingExternalString) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(0, dispose_count);
}
- i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(1, dispose_count);
}
@@ -592,8 +595,8 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
v8::HandleScope scope(env->GetIsolate());
Local<String> source = v8_str(c_source);
// Trigger GCs so that the newly allocated string moves to old gen.
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(
new TestAsciiResource(i::StrDup(c_source), &dispose_count));
CHECK(success);
@@ -601,11 +604,11 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(0, dispose_count);
}
- i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(1, dispose_count);
}
@@ -615,8 +618,8 @@ TEST(MakingExternalStringConditions) {
v8::HandleScope scope(env->GetIsolate());
// Free some space in the new space so that we can check freshness.
- HEAP->CollectGarbage(i::NEW_SPACE);
- HEAP->CollectGarbage(i::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
Local<String> small_string = String::New(two_byte_string);
@@ -625,8 +628,8 @@ TEST(MakingExternalStringConditions) {
// We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
@@ -661,15 +664,15 @@ TEST(MakingExternalAsciiStringConditions) {
v8::HandleScope scope(env->GetIsolate());
// Free some space in the new space so that we can check freshness.
- HEAP->CollectGarbage(i::NEW_SPACE);
- HEAP->CollectGarbage(i::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
Local<String> small_string = String::New("s1");
// We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
@@ -707,9 +710,9 @@ TEST(MakingExternalUnalignedAsciiString) {
"slice('abcdefghijklmnopqrstuvwxyz');"));
// Trigger GCs so that the newly allocated string moves to old gen.
- SimulateFullSpace(HEAP->old_pointer_space());
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ SimulateFullSpace(CcTest::heap()->old_pointer_space());
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
// Turn into external string with unaligned resource data.
int dispose_count = 0;
@@ -723,48 +726,48 @@ TEST(MakingExternalUnalignedAsciiString) {
CHECK(success);
// Trigger GCs and force evacuation.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask);
}
THREADED_TEST(UsingExternalString) {
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = CcTest::i_isolate()->factory();
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
Local<String> string =
String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
factory->InternalizedStringFromString(istring);
CHECK(isymbol->IsInternalizedString());
}
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
THREADED_TEST(UsingExternalAsciiString) {
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = CcTest::i_isolate()->factory();
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
- HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
- HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
factory->InternalizedStringFromString(istring);
CHECK(isymbol->IsInternalizedString());
}
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -774,18 +777,19 @@ THREADED_TEST(ScavengeExternalString) {
int dispose_count = 0;
bool in_new_space = false;
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
Local<String> string =
String::NewExternal(new TestResource(two_byte_string,
&dispose_count));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- HEAP->CollectGarbage(i::NEW_SPACE);
- in_new_space = HEAP->InNewSpace(*istring);
- CHECK(in_new_space || HEAP->old_data_space()->Contains(*istring));
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ in_new_space = CcTest::heap()->InNewSpace(*istring);
+ CHECK(in_new_space || CcTest::heap()->old_data_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- HEAP->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+ CcTest::heap()->CollectGarbage(
+ in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -796,17 +800,18 @@ THREADED_TEST(ScavengeExternalAsciiString) {
int dispose_count = 0;
bool in_new_space = false;
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_string), &dispose_count));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
- HEAP->CollectGarbage(i::NEW_SPACE);
- in_new_space = HEAP->InNewSpace(*istring);
- CHECK(in_new_space || HEAP->old_data_space()->Contains(*istring));
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+ in_new_space = CcTest::heap()->InNewSpace(*istring);
+ CHECK(in_new_space || CcTest::heap()->old_data_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- HEAP->CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+ CcTest::heap()->CollectGarbage(
+ in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -849,11 +854,11 @@ TEST(ExternalStringWithDisposeHandling) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
}
- i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
@@ -870,11 +875,11 @@ TEST(ExternalStringWithDisposeHandling) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
}
- i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
}
@@ -920,9 +925,9 @@ THREADED_TEST(StringConcat) {
CHECK(value->IsNumber());
CHECK_EQ(68, value->Int32Value());
}
- i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::i_isolate()->compilation_cache()->Clear();
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -940,7 +945,7 @@ template<typename T>
static void CheckReturnValue(const T& t, i::Address callback) {
v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
i::Object** o = *reinterpret_cast<i::Object***>(&rv);
- CHECK_EQ(v8::Isolate::GetCurrent(), t.GetIsolate());
+ CHECK_EQ(CcTest::isolate(), t.GetIsolate());
CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
// Verify reset
@@ -1192,7 +1197,7 @@ Handle<Value> TestFastReturnValues() {
THREADED_PROFILED_TEST(FastReturnValues) {
LocalContext env;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Value> value;
// check int32_t and uint32_t
int32_t int_values[] = {
@@ -1412,7 +1417,7 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
THREADED_TEST(TinyInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
int32_t value = 239;
Local<v8::Integer> value_obj = v8::Integer::New(value);
@@ -1426,7 +1431,7 @@ THREADED_TEST(TinyInteger) {
THREADED_TEST(BigSmiInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
int32_t value = i::Smi::kMaxValue;
// We cannot add one to a Smi::kMaxValue without wrapping.
@@ -1446,7 +1451,7 @@ THREADED_TEST(BigSmiInteger) {
THREADED_TEST(BigInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
// We cannot add one to a Smi::kMaxValue without wrapping.
if (i::SmiValuesAre31Bits()) {
@@ -1469,7 +1474,7 @@ THREADED_TEST(BigInteger) {
THREADED_TEST(TinyUnsignedInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
uint32_t value = 239;
@@ -1484,7 +1489,7 @@ THREADED_TEST(TinyUnsignedInteger) {
THREADED_TEST(BigUnsignedSmiInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue);
CHECK(i::Smi::IsValid(value));
@@ -1501,7 +1506,7 @@ THREADED_TEST(BigUnsignedSmiInteger) {
THREADED_TEST(BigUnsignedInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue) + 1;
CHECK(value > static_cast<uint32_t>(i::Smi::kMaxValue));
@@ -1518,7 +1523,7 @@ THREADED_TEST(BigUnsignedInteger) {
THREADED_TEST(OutOfSignedRangeUnsignedInteger) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
uint32_t INT32_MAX_AS_UINT = (1U << 31) - 1;
uint32_t value = INT32_MAX_AS_UINT + 1;
@@ -1679,12 +1684,13 @@ THREADED_TEST(Number) {
THREADED_TEST(ToNumber) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
Local<String> str = v8_str("3.1415926");
CHECK_EQ(3.1415926, str->NumberValue());
- v8::Handle<v8::Boolean> t = v8::True();
+ v8::Handle<v8::Boolean> t = v8::True(isolate);
CHECK_EQ(1.0, t->NumberValue());
- v8::Handle<v8::Boolean> f = v8::False();
+ v8::Handle<v8::Boolean> f = v8::False(isolate);
CHECK_EQ(0.0, f->NumberValue());
}
@@ -1703,13 +1709,13 @@ THREADED_TEST(Date) {
THREADED_TEST(Boolean) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::Boolean> t = v8::True();
+ v8::Handle<v8::Boolean> t = v8::True(CcTest::isolate());
CHECK(t->Value());
- v8::Handle<v8::Boolean> f = v8::False();
+ v8::Handle<v8::Boolean> f = v8::False(CcTest::isolate());
CHECK(!f->Value());
- v8::Handle<v8::Primitive> u = v8::Undefined();
+ v8::Handle<v8::Primitive> u = v8::Undefined(CcTest::isolate());
CHECK(!u->BooleanValue());
- v8::Handle<v8::Primitive> n = v8::Null();
+ v8::Handle<v8::Primitive> n = v8::Null(CcTest::isolate());
CHECK(!n->BooleanValue());
v8::Handle<String> str1 = v8_str("");
CHECK(!str1->BooleanValue());
@@ -1737,7 +1743,7 @@ static void GetM(Local<String> name,
THREADED_TEST(GlobalPrototype) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> func_templ = v8::FunctionTemplate::New();
func_templ->PrototypeTemplate()->Set(
"dummy",
@@ -1755,7 +1761,7 @@ THREADED_TEST(GlobalPrototype) {
THREADED_TEST(ObjectTemplate) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ1 = ObjectTemplate::New();
templ1->Set("x", v8_num(10));
templ1->Set("y", v8_num(13));
@@ -1792,7 +1798,7 @@ static void GetKnurd(Local<String> property,
THREADED_TEST(DescriptorInheritance) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> super = v8::FunctionTemplate::New();
super->PrototypeTemplate()->Set("flabby",
v8::FunctionTemplate::New(GetFlabby));
@@ -1933,7 +1939,7 @@ void AddInterceptor(Handle<FunctionTemplate> templ,
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -1951,7 +1957,7 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -1972,7 +1978,7 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -1992,7 +1998,7 @@ THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
THREADED_TEST(SwitchFromInterceptorToAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddAccessor(templ, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
@@ -2010,7 +2016,7 @@ THREADED_TEST(SwitchFromInterceptorToAccessor) {
THREADED_TEST(SwitchFromAccessorToInterceptor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddAccessor(templ, v8_str("age"),
SimpleAccessorGetter, SimpleAccessorSetter);
@@ -2028,7 +2034,7 @@ THREADED_TEST(SwitchFromAccessorToInterceptor) {
THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -2048,7 +2054,7 @@ THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -2068,7 +2074,7 @@ THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -2093,7 +2099,7 @@ THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -2118,7 +2124,7 @@ THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
THREADED_TEST(SwitchFromInterceptorToProperty) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -2136,7 +2142,7 @@ THREADED_TEST(SwitchFromInterceptorToProperty) {
THREADED_TEST(SwitchFromPropertyToInterceptor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> parent = FunctionTemplate::New();
Handle<FunctionTemplate> child = FunctionTemplate::New();
child->Inherit(parent);
@@ -2155,7 +2161,7 @@ THREADED_TEST(SwitchFromPropertyToInterceptor) {
THREADED_TEST(NamedPropertyHandlerGetter) {
echo_named_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
templ->InstanceTemplate()->SetNamedPropertyHandler(EchoNamedProperty,
0, 0, 0, 0,
@@ -2191,7 +2197,7 @@ static void EchoIndexedProperty(
THREADED_TEST(IndexedPropertyHandlerGetter) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
templ->InstanceTemplate()->SetIndexedPropertyHandler(EchoIndexedProperty,
0, 0, 0, 0,
@@ -2362,7 +2368,7 @@ static void PrePropertyHandlerQuery(
THREADED_TEST(PrePropertyHandler) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New();
desc->InstanceTemplate()->SetNamedPropertyHandler(PrePropertyHandlerGet,
0,
@@ -2419,7 +2425,7 @@ static void CallFunctionRecursivelyCall(
THREADED_TEST(DeepCrossLanguageRecursion) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New();
global->Set(v8_str("callScriptRecursively"),
v8::FunctionTemplate::New(CallScriptRecursivelyCall));
@@ -2441,7 +2447,7 @@ static void ThrowingPropertyHandlerGet(
Local<String> key,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- info.GetReturnValue().Set(v8::ThrowException(key));
+ info.GetReturnValue().Set(info.GetIsolate()->ThrowException(key));
}
@@ -2449,13 +2455,13 @@ static void ThrowingPropertyHandlerSet(
Local<String> key,
Local<Value>,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- v8::ThrowException(key);
+ info.GetIsolate()->ThrowException(key);
info.GetReturnValue().SetUndefined(); // not the same as empty handle
}
THREADED_TEST(CallbackExceptionRegression) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetNamedPropertyHandler(ThrowingPropertyHandlerGet,
ThrowingPropertyHandlerSet);
@@ -2471,7 +2477,7 @@ THREADED_TEST(CallbackExceptionRegression) {
THREADED_TEST(FunctionPrototype) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New();
Foo->PrototypeTemplate()->Set(v8_str("plak"), v8_num(321));
LocalContext env;
@@ -2497,7 +2503,7 @@ THREADED_TEST(InternalFields) {
THREADED_TEST(GlobalObjectInternalFields) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetInternalFieldCount(1);
LocalContext env(NULL, global_template);
@@ -2512,7 +2518,7 @@ THREADED_TEST(GlobalObjectInternalFields) {
THREADED_TEST(GlobalObjectHasRealIndexedProperty) {
LocalContext env;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Object> global = env->Global();
global->Set(0, v8::String::New("value"));
@@ -2524,7 +2530,7 @@ static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
obj->SetAlignedPointerInInternalField(0, value);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(value, obj->GetAlignedPointerFromInternalField(0));
}
@@ -2558,7 +2564,7 @@ static void CheckAlignedPointerInEmbedderData(LocalContext* env,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
(*env)->SetAlignedPointerInEmbedderData(index, value);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(value, (*env)->GetAlignedPointerFromEmbedderData(index));
}
@@ -2588,7 +2594,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
for (int i = 0; i < 100; i++) {
env->SetAlignedPointerInEmbedderData(i, AlignedTestPointer(i));
}
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
for (int i = 0; i < 100; i++) {
CHECK_EQ(AlignedTestPointer(i), env->GetAlignedPointerFromEmbedderData(i));
}
@@ -2620,7 +2626,7 @@ THREADED_TEST(IdentityHash) {
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
Local<v8::Object> obj = v8::Object::New();
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
@@ -2630,7 +2636,7 @@ THREADED_TEST(IdentityHash) {
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
int hash3 = v8::Object::New()->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
@@ -2669,7 +2675,7 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, "my-symbol");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
// Check basic symbol functionality.
CHECK(sym1->IsSymbol());
@@ -2720,7 +2726,7 @@ THREADED_TEST(SymbolProperties) {
CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -2770,7 +2776,7 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
ScopedArrayBufferContents ab_contents(ab->Externalize());
CHECK(ab->IsExternal());
@@ -3012,7 +3018,7 @@ THREADED_TEST(HiddenProperties) {
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
// Make sure delete of a non-existent hidden value works
CHECK(obj->DeleteHiddenValue(key));
@@ -3022,7 +3028,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
// Make sure we do not find the hidden property.
CHECK(!obj->Has(empty));
@@ -3033,7 +3039,7 @@ THREADED_TEST(HiddenProperties) {
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK_EQ(2003, obj->Get(empty)->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3044,7 +3050,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->Delete(prop_name));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK(obj->SetHiddenValue(key, Handle<Value>()));
CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -3111,7 +3117,7 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) {
THREADED_TEST(External) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
int x = 3;
Local<v8::External> ext = v8::External::New(&x);
LocalContext env;
@@ -3143,7 +3149,7 @@ THREADED_TEST(External) {
THREADED_TEST(GlobalHandle) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
@@ -3168,7 +3174,7 @@ THREADED_TEST(GlobalHandle) {
THREADED_TEST(ResettingGlobalHandle) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
@@ -3196,7 +3202,7 @@ THREADED_TEST(ResettingGlobalHandle) {
THREADED_TEST(ResettingGlobalHandleToEmpty) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Persistent<String> global;
{
v8::HandleScope scope(isolate);
@@ -3220,7 +3226,7 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
THREADED_TEST(ClearAndLeakGlobal) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::internal::GlobalHandles* global_handles = NULL;
int initial_handle_count = 0;
v8::Persistent<String> global;
@@ -3242,9 +3248,9 @@ THREADED_TEST(ClearAndLeakGlobal) {
THREADED_TEST(GlobalHandleUpcast) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
+ v8::Local<String> local = v8::Local<String>::New(isolate, v8_str("str"));
v8::Persistent<String> global_string(isolate, local);
v8::Persistent<Value>& global_value =
v8::Persistent<Value>::Cast(global_string);
@@ -3255,7 +3261,7 @@ THREADED_TEST(GlobalHandleUpcast) {
THREADED_TEST(HandleEquality) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Persistent<String> global1;
v8::Persistent<String> global2;
{
@@ -3293,11 +3299,9 @@ THREADED_TEST(HandleEquality) {
THREADED_TEST(LocalHandle) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
- CHECK_EQ(local->Length(), 3);
-
- local = v8::Local<String>::New(v8::Isolate::GetCurrent(), v8_str("str"));
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<String> local =
+ v8::Local<String>::New(CcTest::isolate(), v8_str("str"));
CHECK_EQ(local->Length(), 3);
}
@@ -3314,8 +3318,9 @@ class WeakCallCounter {
};
+template<typename T>
static void WeakPointerCallback(v8::Isolate* isolate,
- Persistent<Value>* handle,
+ Persistent<T>* handle,
WeakCallCounter* counter) {
CHECK_EQ(1234, counter->id());
counter->increment();
@@ -3323,7 +3328,8 @@ static void WeakPointerCallback(v8::Isolate* isolate,
}
-static UniqueId MakeUniqueId(const Persistent<Value>& p) {
+template<typename T>
+static UniqueId MakeUniqueId(const Persistent<T>& p) {
return UniqueId(reinterpret_cast<uintptr_t>(*v8::Utils::OpenPersistent(p)));
}
@@ -3421,6 +3427,97 @@ THREADED_TEST(ApiObjectGroups) {
}
+THREADED_TEST(ApiObjectGroupsForSubtypes) {
+ LocalContext env;
+ v8::Isolate* iso = env->GetIsolate();
+ HandleScope scope(iso);
+
+ Persistent<Object> g1s1;
+ Persistent<String> g1s2;
+ Persistent<String> g1c1;
+ Persistent<Object> g2s1;
+ Persistent<String> g2s2;
+ Persistent<String> g2c1;
+
+ WeakCallCounter counter(1234);
+
+ {
+ HandleScope scope(iso);
+ g1s1.Reset(iso, Object::New());
+ g1s2.Reset(iso, String::New("foo1"));
+ g1c1.Reset(iso, String::New("foo2"));
+ g1s1.MakeWeak(&counter, &WeakPointerCallback);
+ g1s2.MakeWeak(&counter, &WeakPointerCallback);
+ g1c1.MakeWeak(&counter, &WeakPointerCallback);
+
+ g2s1.Reset(iso, Object::New());
+ g2s2.Reset(iso, String::New("foo3"));
+ g2c1.Reset(iso, String::New("foo4"));
+ g2s1.MakeWeak(&counter, &WeakPointerCallback);
+ g2s2.MakeWeak(&counter, &WeakPointerCallback);
+ g2c1.MakeWeak(&counter, &WeakPointerCallback);
+ }
+
+ Persistent<Value> root(iso, g1s1); // make a root.
+
+ // Connect group 1 and 2, make a cycle.
+ {
+ HandleScope scope(iso);
+ CHECK(Local<Object>::New(iso, g1s1)->Set(0, Local<Object>::New(iso, g2s1)));
+ CHECK(Local<Object>::New(iso, g2s1)->Set(0, Local<Object>::New(iso, g1s1)));
+ }
+
+ {
+ UniqueId id1 = MakeUniqueId(g1s1);
+ UniqueId id2 = MakeUniqueId(g2s2);
+ iso->SetObjectGroupId(g1s1, id1);
+ iso->SetObjectGroupId(g1s2, id1);
+ iso->SetReference(g1s1, g1c1);
+ iso->SetObjectGroupId(g2s1, id2);
+ iso->SetObjectGroupId(g2s2, id2);
+ iso->SetReferenceFromGroup(id2, g2c1);
+ }
+ // Do a single full GC, ensure incremental marking is stopped.
+ v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
+ iso)->heap();
+ heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+
+ // All object should be alive.
+ CHECK_EQ(0, counter.NumberOfWeakCalls());
+
+ // Weaken the root.
+ root.MakeWeak(&counter, &WeakPointerCallback);
+ // But make children strong roots---all the objects (except for children)
+ // should be collectable now.
+ g1c1.ClearWeak();
+ g2c1.ClearWeak();
+
+ // Groups are deleted, rebuild groups.
+ {
+ UniqueId id1 = MakeUniqueId(g1s1);
+ UniqueId id2 = MakeUniqueId(g2s2);
+ iso->SetObjectGroupId(g1s1, id1);
+ iso->SetObjectGroupId(g1s2, id1);
+ iso->SetReference(g1s1, g1c1);
+ iso->SetObjectGroupId(g2s1, id2);
+ iso->SetObjectGroupId(g2s2, id2);
+ iso->SetReferenceFromGroup(id2, g2c1);
+ }
+
+ heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+
+ // All objects should be gone. 5 global handles in total.
+ CHECK_EQ(5, counter.NumberOfWeakCalls());
+
+ // And now make children weak again and collect them.
+ g1c1.MakeWeak(&counter, &WeakPointerCallback);
+ g2c1.MakeWeak(&counter, &WeakPointerCallback);
+
+ heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(7, counter.NumberOfWeakCalls());
+}
+
+
THREADED_TEST(ApiObjectGroupsCycle) {
LocalContext env;
v8::Isolate* iso = env->GetIsolate();
@@ -3675,7 +3772,7 @@ static void check_message_0(v8::Handle<v8::Message> message,
THREADED_TEST(MessageHandler0) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
LocalContext context;
@@ -3702,7 +3799,7 @@ static void check_message_1(v8::Handle<v8::Message> message,
TEST(MessageHandler1) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_1);
LocalContext context;
@@ -3727,7 +3824,7 @@ static void check_message_2(v8::Handle<v8::Message> message,
TEST(MessageHandler2) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_2);
LocalContext context;
@@ -3752,15 +3849,16 @@ static void check_message_3(v8::Handle<v8::Message> message,
TEST(MessageHandler3) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_3);
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1),
- v8::Integer::New(2),
- v8::True());
+ v8::Integer::New(1, isolate),
+ v8::Integer::New(2, isolate),
+ v8::True(isolate));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
&origin);
script->Run();
@@ -3780,15 +3878,16 @@ static void check_message_4(v8::Handle<v8::Message> message,
TEST(MessageHandler4) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_4);
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1),
- v8::Integer::New(2),
- v8::False());
+ v8::Integer::New(1, isolate),
+ v8::Integer::New(2, isolate),
+ v8::False(isolate));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
&origin);
script->Run();
@@ -3816,15 +3915,16 @@ static void check_message_5b(v8::Handle<v8::Message> message,
TEST(MessageHandler5) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_5a);
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1),
- v8::Integer::New(2),
- v8::True());
+ v8::Integer::New(1, isolate),
+ v8::Integer::New(2, isolate),
+ v8::True(isolate));
v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
&origin);
script->Run();
@@ -3836,9 +3936,9 @@ TEST(MessageHandler5) {
v8::V8::AddMessageListener(check_message_5b);
origin =
v8::ScriptOrigin(v8_str("6.75"),
- v8::Integer::New(1),
- v8::Integer::New(2),
- v8::False());
+ v8::Integer::New(1, isolate),
+ v8::Integer::New(2, isolate),
+ v8::False(isolate));
script = Script::Compile(v8_str("throw 'error'"),
&origin);
script->Run();
@@ -3958,7 +4058,7 @@ void HandleF(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(Vector) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> global = ObjectTemplate::New();
global->Set(v8_str("f"), v8::FunctionTemplate::New(HandleF));
LocalContext context(0, global);
@@ -3997,7 +4097,8 @@ THREADED_TEST(Vector) {
THREADED_TEST(FunctionCall) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
CompileRun(
"function Foo() {"
" var result = [];"
@@ -4005,9 +4106,20 @@ THREADED_TEST(FunctionCall) {
" result.push(arguments[i]);"
" }"
" return result;"
+ "}"
+ "function ReturnThisSloppy() {"
+ " return this;"
+ "}"
+ "function ReturnThisStrict() {"
+ " 'use strict';"
+ " return this;"
"}");
Local<Function> Foo =
Local<Function>::Cast(context->Global()->Get(v8_str("Foo")));
+ Local<Function> ReturnThisSloppy =
+ Local<Function>::Cast(context->Global()->Get(v8_str("ReturnThisSloppy")));
+ Local<Function> ReturnThisStrict =
+ Local<Function>::Cast(context->Global()->Get(v8_str("ReturnThisStrict")));
v8::Handle<Value>* args0 = NULL;
Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->Call(Foo, 0, args0));
@@ -4044,6 +4156,31 @@ THREADED_TEST(FunctionCall) {
CHECK_EQ(8.8, a4->Get(v8::Integer::New(1))->NumberValue());
CHECK_EQ(9.9, a4->Get(v8::Integer::New(2))->NumberValue());
CHECK_EQ(10.11, a4->Get(v8::Integer::New(3))->NumberValue());
+
+ Local<v8::Value> r1 = ReturnThisSloppy->Call(v8::Undefined(isolate), 0, NULL);
+ CHECK(r1->StrictEquals(context->Global()));
+ Local<v8::Value> r2 = ReturnThisSloppy->Call(v8::Null(isolate), 0, NULL);
+ CHECK(r2->StrictEquals(context->Global()));
+ Local<v8::Value> r3 = ReturnThisSloppy->Call(v8_num(42), 0, NULL);
+ CHECK(r3->IsNumberObject());
+ CHECK_EQ(42.0, r3.As<v8::NumberObject>()->ValueOf());
+ Local<v8::Value> r4 = ReturnThisSloppy->Call(v8_str("hello"), 0, NULL);
+ CHECK(r4->IsStringObject());
+ CHECK(r4.As<v8::StringObject>()->ValueOf()->StrictEquals(v8_str("hello")));
+ Local<v8::Value> r5 = ReturnThisSloppy->Call(v8::True(isolate), 0, NULL);
+ CHECK(r5->IsBooleanObject());
+ CHECK(r5.As<v8::BooleanObject>()->ValueOf());
+
+ Local<v8::Value> r6 = ReturnThisStrict->Call(v8::Undefined(isolate), 0, NULL);
+ CHECK(r6->IsUndefined());
+ Local<v8::Value> r7 = ReturnThisStrict->Call(v8::Null(isolate), 0, NULL);
+ CHECK(r7->IsNull());
+ Local<v8::Value> r8 = ReturnThisStrict->Call(v8_num(42), 0, NULL);
+ CHECK(r8->StrictEquals(v8_num(42)));
+ Local<v8::Value> r9 = ReturnThisStrict->Call(v8_str("hello"), 0, NULL);
+ CHECK(r9->StrictEquals(v8_str("hello")));
+ Local<v8::Value> r10 = ReturnThisStrict->Call(v8::True(isolate), 0, NULL);
+ CHECK(r10->StrictEquals(v8::True(isolate)));
}
@@ -4104,7 +4241,7 @@ TEST(OutOfMemoryNested) {
constraints.set_max_old_space_size(5 * K * K);
v8::SetResourceConstraints(&constraints);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("ProvokeOutOfMemory"),
v8::FunctionTemplate::New(ProvokeOutOfMemory));
@@ -4311,7 +4448,8 @@ THREADED_TEST(isNumberType) {
THREADED_TEST(ConversionException) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
CompileRun(
"function TestClass() { };"
"TestClass.prototype.toString = function () { throw 'uncle?'; };"
@@ -4340,7 +4478,7 @@ THREADED_TEST(ConversionException) {
CHECK(to_int32_result.IsEmpty());
CheckUncle(&try_catch);
- Local<Value> to_object_result = v8::Undefined()->ToObject();
+ Local<Value> to_object_result = v8::Undefined(isolate)->ToObject();
CHECK(to_object_result.IsEmpty());
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -4365,7 +4503,7 @@ THREADED_TEST(ConversionException) {
void ThrowFromC(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
- v8::ThrowException(v8_str("konto"));
+ args.GetIsolate()->ThrowException(v8_str("konto"));
}
@@ -4383,7 +4521,7 @@ void CCatcher(const v8::FunctionCallbackInfo<v8::Value>& args) {
THREADED_TEST(APICatch) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(ThrowFromC));
@@ -4401,7 +4539,7 @@ THREADED_TEST(APICatch) {
THREADED_TEST(APIThrowTryCatch) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(ThrowFromC));
@@ -4420,7 +4558,7 @@ THREADED_TEST(APIThrowTryCatch) {
// JS stack. This test therefore fails on the simulator. The test is
// not threaded to allow the threading tests to run on the simulator.
TEST(TryCatchInTryFinally) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("CCatcher"),
v8::FunctionTemplate::New(CCatcher));
@@ -4454,7 +4592,7 @@ static void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
// formatting. However, they are invoked when performing normal error
// string conversions.
TEST(APIThrowMessageOverwrittenToString) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::V8::AddMessageListener(check_reference_error_message);
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("fail"), v8::FunctionTemplate::New(Fail));
@@ -4578,7 +4716,7 @@ static void receive_message(v8::Handle<v8::Message> message,
TEST(APIThrowMessage) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::V8::AddMessageListener(receive_message);
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("ThrowFromC"),
@@ -4592,7 +4730,7 @@ TEST(APIThrowMessage) {
TEST(APIThrowMessageAndVerboseTryCatch) {
message_received = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::V8::AddMessageListener(receive_message);
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("ThrowFromC"),
@@ -4624,7 +4762,7 @@ TEST(APIStackOverflowAndVerboseTryCatch) {
THREADED_TEST(ExternalScriptException) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(ThrowFromC));
@@ -4648,10 +4786,11 @@ void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
int count = args[0]->Int32Value();
int cInterval = args[2]->Int32Value();
if (count == 0) {
- v8::ThrowException(v8_str("FromC"));
+ args.GetIsolate()->ThrowException(v8_str("FromC"));
return;
} else {
- Local<v8::Object> global = Context::GetCurrent()->Global();
+ Local<v8::Object> global =
+ args.GetIsolate()->GetCurrentContext()->Global();
Local<Value> fun = global->Get(v8_str("JSThrowCountDown"));
v8::Handle<Value> argv[] = { v8_num(count - 1),
args[1],
@@ -4664,7 +4803,7 @@ void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (try_catch.HasCaught()) {
CHECK_EQ(expected, count);
CHECK(result.IsEmpty());
- CHECK(!i::Isolate::Current()->has_scheduled_exception());
+ CHECK(!CcTest::i_isolate()->has_scheduled_exception());
} else {
CHECK_NE(expected, count);
}
@@ -4728,7 +4867,7 @@ THREADED_TEST(EvalInTryFinally) {
// JS stack. This test therefore fails on the simulator. The test is
// not threaded to allow the threading tests to run on the simulator.
TEST(ExceptionOrder) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("check"), v8::FunctionTemplate::New(JSCheck));
templ->Set(v8_str("CThrowCountDown"),
@@ -4787,12 +4926,12 @@ TEST(ExceptionOrder) {
void ThrowValue(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(1, args.Length());
- v8::ThrowException(args[0]);
+ args.GetIsolate()->ThrowException(args[0]);
}
THREADED_TEST(ThrowValues) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(ThrowValue));
LocalContext context(0, templ);
@@ -4885,7 +5024,7 @@ static void TryCatchNestedHelper(int depth) {
CHECK(try_catch.HasCaught());
try_catch.ReThrow();
} else {
- v8::ThrowException(v8_str("back"));
+ CcTest::isolate()->ThrowException(v8_str("back"));
}
}
@@ -4930,7 +5069,7 @@ void TryCatchMixedNestingHelper(
// This exercises the ability of TryCatch.ReThrow() to restore the
// inner pending Message before throwing the exception again.
TEST(TryCatchMixedNesting) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::V8::Initialize();
v8::TryCatch try_catch;
Local<ObjectTemplate> templ = ObjectTemplate::New();
@@ -4962,16 +5101,26 @@ THREADED_TEST(Equality) {
CHECK(!v8_str("5")->StrictEquals(v8_num(5)));
CHECK(v8_num(1)->StrictEquals(v8_num(1)));
CHECK(!v8_num(1)->StrictEquals(v8_num(2)));
- CHECK(v8_num(0)->StrictEquals(v8_num(-0)));
+ CHECK(v8_num(0.0)->StrictEquals(v8_num(-0.0)));
Local<Value> not_a_number = v8_num(i::OS::nan_value());
CHECK(!not_a_number->StrictEquals(not_a_number));
- CHECK(v8::False()->StrictEquals(v8::False()));
- CHECK(!v8::False()->StrictEquals(v8::Undefined()));
+ CHECK(v8::False(isolate)->StrictEquals(v8::False(isolate)));
+ CHECK(!v8::False(isolate)->StrictEquals(v8::Undefined(isolate)));
v8::Handle<v8::Object> obj = v8::Object::New();
v8::Persistent<v8::Object> alias(isolate, obj);
CHECK(v8::Local<v8::Object>::New(isolate, alias)->StrictEquals(obj));
alias.Dispose();
+
+ CHECK(v8_str("a")->SameValue(v8_str("a")));
+ CHECK(!v8_str("a")->SameValue(v8_str("b")));
+ CHECK(!v8_str("5")->SameValue(v8_num(5)));
+ CHECK(v8_num(1)->SameValue(v8_num(1)));
+ CHECK(!v8_num(1)->SameValue(v8_num(2)));
+ CHECK(!v8_num(0.0)->SameValue(v8_num(-0.0)));
+ CHECK(not_a_number->SameValue(not_a_number));
+ CHECK(v8::False(isolate)->SameValue(v8::False(isolate)));
+ CHECK(!v8::False(isolate)->SameValue(v8::Undefined(isolate)));
}
@@ -5056,7 +5205,7 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
LocalContext context;
@@ -5108,7 +5257,7 @@ static v8::Handle<v8::Object> GetGlobalProperty(LocalContext* context,
THREADED_TEST(DefineAPIAccessorOnObject) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
@@ -5182,7 +5331,7 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
@@ -5238,7 +5387,7 @@ static void Get239Value(Local<String> name,
THREADED_TEST(ElementAPIAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
@@ -5276,7 +5425,7 @@ static void SetXValue(Local<String> name,
THREADED_TEST(SimplePropertyWrite) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("x"), GetXValue, SetXValue, v8_str("donut"));
LocalContext context;
@@ -5285,7 +5434,7 @@ THREADED_TEST(SimplePropertyWrite) {
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
- CHECK_EQ(v8_num(4), Local<Value>::New(v8::Isolate::GetCurrent(), xValue));
+ CHECK_EQ(v8_num(4), Local<Value>::New(CcTest::isolate(), xValue));
xValue.Dispose();
xValue.Clear();
}
@@ -5293,7 +5442,7 @@ THREADED_TEST(SimplePropertyWrite) {
THREADED_TEST(SetterOnly) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
LocalContext context;
@@ -5302,7 +5451,7 @@ THREADED_TEST(SetterOnly) {
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
- CHECK_EQ(v8_num(4), Local<Value>::New(v8::Isolate::GetCurrent(), xValue));
+ CHECK_EQ(v8_num(4), Local<Value>::New(CcTest::isolate(), xValue));
xValue.Dispose();
xValue.Clear();
}
@@ -5310,7 +5459,7 @@ THREADED_TEST(SetterOnly) {
THREADED_TEST(NoAccessors) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("x"),
static_cast<v8::AccessorGetterCallback>(NULL),
@@ -5334,7 +5483,7 @@ static void XPropertyGetter(Local<String> property,
THREADED_TEST(NamedInterceptorPropertyRead) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
@@ -5348,7 +5497,7 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
THREADED_TEST(NamedInterceptorDictionaryIC) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
@@ -5378,7 +5527,7 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> context1 = Context::New(isolate);
@@ -5430,7 +5579,7 @@ static void SetXOnPrototypeGetter(
// This is a regression test for http://crbug.com/20104. Map
// transitions should not interfere with post interceptor lookup.
THREADED_TEST(NamedInterceptorMapTransitionRead) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<v8::FunctionTemplate> function_template = v8::FunctionTemplate::New();
Local<v8::ObjectTemplate> instance_template
= function_template->InstanceTemplate();
@@ -5467,7 +5616,7 @@ static void IndexedPropertySetter(
THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IndexedPropertyGetter,
IndexedPropertySetter);
@@ -5532,7 +5681,7 @@ void UnboxedDoubleIndexedPropertyEnumerator(
// Make sure that the the interceptor code in the runtime properly handles
// merging property name lists for double-array-backed arrays.
THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(UnboxedDoubleIndexedPropertyGetter,
UnboxedDoubleIndexedPropertySetter,
@@ -5588,7 +5737,7 @@ static void NonStrictIndexedPropertyGetter(
// Make sure that the the interceptor code in the runtime properly handles
// merging property name lists for non-string arguments arrays.
THREADED_TEST(IndexedInterceptorNonStrictArgsWithIndexedAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(NonStrictIndexedPropertyGetter,
0,
@@ -5614,7 +5763,7 @@ static void IdentityIndexedPropertyGetter(
THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5635,7 +5784,7 @@ THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
THREADED_TEST(IndexedInterceptorWithNoSetter) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5658,7 +5807,7 @@ THREADED_TEST(IndexedInterceptorWithNoSetter) {
THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5683,7 +5832,7 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5712,7 +5861,7 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5735,7 +5884,7 @@ THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5774,7 +5923,7 @@ THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5803,7 +5952,7 @@ THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5833,7 +5982,7 @@ THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5863,7 +6012,7 @@ THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
THREADED_TEST(IndexedInterceptorOnProto) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
@@ -5887,7 +6036,7 @@ THREADED_TEST(IndexedInterceptorOnProto) {
THREADED_TEST(MultiContexts) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("dummy"), v8::FunctionTemplate::New(DummyCallHandler));
@@ -5923,7 +6072,7 @@ THREADED_TEST(FunctionPrototypeAcrossContexts) {
// Make sure that functions created by cloning boilerplates cannot
// communicate through their __proto__ field.
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env0;
v8::Handle<v8::Object> global0 =
@@ -5956,7 +6105,7 @@ THREADED_TEST(Regress892105) {
// to Object.prototype and Array.prototype and create a new
// environment. This should succeed.
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<String> source = v8_str("Object.prototype.obj = 1234;"
"Array.prototype.arr = 4567;"
@@ -6187,7 +6336,7 @@ static void HandleLogDelegator(
THREADED_TEST(GlobalObjectTemplate) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
Local<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(v8_str("JSNI_Log"),
@@ -6205,12 +6354,12 @@ static const char* kSimpleExtensionSource =
THREADED_TEST(SimpleExtensions) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
CHECK_EQ(result, v8::Integer::New(4));
@@ -6218,12 +6367,12 @@ THREADED_TEST(SimpleExtensions) {
THREADED_TEST(NullExtensions) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("nulltest", NULL));
const char* extension_names[] = { "nulltest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("1+3"))->Run();
CHECK_EQ(result, v8::Integer::New(4));
@@ -6237,13 +6386,13 @@ static const int kEmbeddedExtensionSourceValidLen = 34;
THREADED_TEST(ExtensionMissingSourceLength) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("srclentest_fail",
kEmbeddedExtensionSource));
const char* extension_names[] = { "srclentest_fail" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
CHECK_EQ(0, *context);
}
@@ -6251,7 +6400,7 @@ THREADED_TEST(ExtensionMissingSourceLength) {
THREADED_TEST(ExtensionWithSourceLength) {
for (int source_len = kEmbeddedExtensionSourceValidLen - 1;
source_len <= kEmbeddedExtensionSourceValidLen + 1; ++source_len) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
i::ScopedVector<char> extension_name(32);
i::OS::SNPrintF(extension_name, "ext #%d", source_len);
v8::RegisterExtension(new Extension(extension_name.start(),
@@ -6260,7 +6409,7 @@ THREADED_TEST(ExtensionWithSourceLength) {
const char* extension_names[1] = { extension_name.start() };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
@@ -6291,13 +6440,13 @@ static const char* kEvalExtensionSource2 =
THREADED_TEST(UseEvalFromExtension) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1));
v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
const char* extension_names[] = { "evaltest1", "evaltest2" };
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
CHECK_EQ(result, v8::Integer::New(42));
@@ -6325,13 +6474,13 @@ static const char* kWithExtensionSource2 =
THREADED_TEST(UseWithFromExtension) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1));
v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
const char* extension_names[] = { "withtest1", "withtest2" };
v8::ExtensionConfiguration extensions(2, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
CHECK_EQ(result, v8::Integer::New(87));
@@ -6341,12 +6490,12 @@ THREADED_TEST(UseWithFromExtension) {
THREADED_TEST(AutoExtensions) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
Extension* extension = new Extension("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
v8::RegisterExtension(extension);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent());
+ Context::New(CcTest::isolate());
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
CHECK_EQ(result, v8::Integer::New(4));
@@ -6360,13 +6509,13 @@ static const char* kSyntaxErrorInExtensionSource =
// Test that a syntax error in an extension does not cause a fatal
// error but results in an empty context.
THREADED_TEST(SyntaxErrorExtensions) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("syntaxerror",
kSyntaxErrorInExtensionSource));
const char* extension_names[] = { "syntaxerror" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -6378,13 +6527,13 @@ static const char* kExceptionInExtensionSource =
// Test that an exception when installing an extension does not cause
// a fatal error but results in an empty context.
THREADED_TEST(ExceptionExtensions) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("exception",
kExceptionInExtensionSource));
const char* extension_names[] = { "exception" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -6400,13 +6549,13 @@ static const char* kNativeCallTest =
// Test that a native runtime calls are supported in extensions.
THREADED_TEST(NativeCallInExtensions) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::RegisterExtension(new Extension("nativecall",
kNativeCallInExtensionSource));
const char* extension_names[] = { "nativecall" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str(kNativeCallTest))->Run();
CHECK_EQ(result, v8::Integer::New(3));
@@ -6435,14 +6584,14 @@ class NativeFunctionExtension : public Extension {
THREADED_TEST(NativeFunctionDeclaration) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedecl";
v8::RegisterExtension(new NativeFunctionExtension(name,
"native function foo();"));
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("foo(42);"))->Run();
CHECK_EQ(result, v8::Integer::New(42));
@@ -6450,7 +6599,7 @@ THREADED_TEST(NativeFunctionDeclaration) {
THREADED_TEST(NativeFunctionDeclarationError) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerr";
// Syntax error in extension code.
v8::RegisterExtension(new NativeFunctionExtension(name,
@@ -6458,13 +6607,13 @@ THREADED_TEST(NativeFunctionDeclarationError) {
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
const char* name = "nativedeclerresc";
// Syntax error in extension code - escape code in "native" means that
// it's not treated as a keyword.
@@ -6474,13 +6623,13 @@ THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
static void CheckDependencies(const char* name, const char* expected) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::ExtensionConfiguration config(1, &name);
LocalContext context(&config);
CHECK_EQ(String::New(expected), context->Global()->Get(v8_str("loaded")));
@@ -6508,7 +6657,7 @@ THREADED_TEST(ExtensionDependency) {
CheckDependencies("C", "undefinedAC");
CheckDependencies("D", "undefinedABCD");
CheckDependencies("E", "undefinedABCDE");
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[2] = { "C", "E" };
v8::ExtensionConfiguration config(2, exts);
LocalContext context(&config);
@@ -6564,7 +6713,7 @@ v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunction(
THREADED_TEST(FunctionLookup) {
v8::RegisterExtension(new FunctionExtension());
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[1] = { "functiontest" };
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
@@ -6577,7 +6726,7 @@ THREADED_TEST(FunctionLookup) {
THREADED_TEST(NativeFunctionConstructCall) {
v8::RegisterExtension(new FunctionExtension());
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
static const char* exts[1] = { "functiontest" };
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
@@ -6616,7 +6765,7 @@ TEST(ErrorReporting) {
last_location = NULL;
v8::ExtensionConfiguration config(1, bDeps);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &config);
+ Context::New(CcTest::isolate(), &config);
CHECK(context.IsEmpty());
CHECK_NE(last_location, NULL);
}
@@ -6637,7 +6786,7 @@ void OOMCallback(const char* location, const char* message) {
TEST(RegexpOutOfMemory) {
// Execute a script that causes out of memory when flattening a string.
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::V8::SetFatalErrorHandler(OOMCallback);
LocalContext context;
Local<Script> script =
@@ -6652,7 +6801,7 @@ TEST(RegexpOutOfMemory) {
static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK(message->GetScriptResourceName()->IsUndefined());
- CHECK_EQ(v8::Undefined(), message->GetScriptResourceName());
+ CHECK_EQ(v8::Undefined(CcTest::isolate()), message->GetScriptResourceName());
message->GetLineNumber();
message->GetSourceLine();
}
@@ -6720,16 +6869,16 @@ void WhammyPropertyGetter(Local<String> name,
THREADED_TEST(WeakReference) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New();
- Whammy* whammy = new Whammy(v8::Isolate::GetCurrent());
+ Whammy* whammy = new Whammy(CcTest::isolate());
templ->SetNamedPropertyHandler(WhammyPropertyGetter,
0, 0, 0, 0,
v8::External::New(whammy));
const char* extension_list[] = { "v8/gc" };
v8::ExtensionConfiguration extensions(1, extension_list);
v8::Handle<Context> context =
- Context::New(v8::Isolate::GetCurrent(), &extensions);
+ Context::New(CcTest::isolate(), &extensions);
Context::Scope context_scope(context);
v8::Handle<v8::Object> interceptor = templ->NewInstance();
@@ -6758,7 +6907,7 @@ static void DisposeAndSetFlag(v8::Isolate* isolate,
THREADED_TEST(IndependentWeakHandle) {
- v8::Isolate* iso = v8::Isolate::GetCurrent();
+ v8::Isolate* iso = CcTest::isolate();
v8::HandleScope scope(iso);
v8::Handle<Context> context = Context::New(iso);
Context::Scope context_scope(context);
@@ -6779,19 +6928,19 @@ THREADED_TEST(IndependentWeakHandle) {
object_a.MarkIndependent();
object_b.MarkIndependent();
CHECK(object_b.IsIndependent());
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
CHECK(object_a_disposed);
CHECK(object_b_disposed);
}
static void InvokeScavenge() {
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
}
static void InvokeMarkSweep() {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -6814,7 +6963,7 @@ static void ForceMarkSweep(v8::Isolate* isolate,
THREADED_TEST(GCFromWeakCallbacks) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Handle<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
@@ -6853,7 +7002,7 @@ static void RevivingCallback(v8::Isolate* isolate,
THREADED_TEST(IndependentHandleRevival) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Handle<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
@@ -6870,9 +7019,9 @@ THREADED_TEST(IndependentHandleRevival) {
bool revived = false;
object.MakeWeak(&revived, &RevivingCallback);
object.MarkIndependent();
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
CHECK(revived);
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
{
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Object> o = v8::Local<v8::Object>::New(isolate, object);
@@ -6889,19 +7038,20 @@ v8::Handle<Function> args_fun;
static void ArgumentsTestCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = args.GetIsolate();
CHECK_EQ(args_fun, args.Callee());
CHECK_EQ(3, args.Length());
- CHECK_EQ(v8::Integer::New(1), args[0]);
- CHECK_EQ(v8::Integer::New(2), args[1]);
- CHECK_EQ(v8::Integer::New(3), args[2]);
- CHECK_EQ(v8::Undefined(), args[3]);
+ CHECK_EQ(v8::Integer::New(1, isolate), args[0]);
+ CHECK_EQ(v8::Integer::New(2, isolate), args[1]);
+ CHECK_EQ(v8::Integer::New(3, isolate), args[2]);
+ CHECK_EQ(v8::Undefined(isolate), args[3]);
v8::HandleScope scope(args.GetIsolate());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
THREADED_TEST(Arguments) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New();
global->Set(v8_str("f"), v8::FunctionTemplate::New(ArgumentsTestCallback));
LocalContext context(NULL, global);
@@ -6941,7 +7091,7 @@ static void IDeleter(uint32_t index,
THREADED_TEST(Deleter) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetNamedPropertyHandler(NoBlockGetterX, NULL, NULL, PDeleter, NULL);
obj->SetIndexedPropertyHandler(NoBlockGetterI, NULL, NULL, IDeleter, NULL);
@@ -7004,7 +7154,7 @@ static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
THREADED_TEST(Enumerators) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetNamedPropertyHandler(GetK, NULL, NULL, NULL, NamedEnum);
obj->SetIndexedPropertyHandler(IndexedGetK, NULL, NULL, NULL, IndexedEnum);
@@ -7068,7 +7218,8 @@ static void PGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
p_getter_count++;
- v8::Handle<v8::Object> global = Context::GetCurrent()->Global();
+ v8::Handle<v8::Object> global =
+ info.GetIsolate()->GetCurrentContext()->Global();
CHECK_EQ(info.Holder(), global->Get(v8_str("o1")));
if (name->Equals(v8_str("p1"))) {
CHECK_EQ(info.This(), global->Get(v8_str("o1")));
@@ -7102,7 +7253,8 @@ static void PGetter2(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
p_getter_count2++;
- v8::Handle<v8::Object> global = Context::GetCurrent()->Global();
+ v8::Handle<v8::Object> global =
+ info.GetIsolate()->GetCurrentContext()->Global();
CHECK_EQ(info.Holder(), global->Get(v8_str("o1")));
if (name->Equals(v8_str("p1"))) {
CHECK_EQ(info.This(), global->Get(v8_str("o1")));
@@ -7117,7 +7269,7 @@ static void PGetter2(Local<String> name,
THREADED_TEST(GetterHolders) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetAccessor(v8_str("p1"), PGetter);
obj->SetAccessor(v8_str("p2"), PGetter);
@@ -7130,7 +7282,7 @@ THREADED_TEST(GetterHolders) {
THREADED_TEST(PreInterceptorHolders) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetNamedPropertyHandler(PGetter2);
p_getter_count2 = 0;
@@ -7140,19 +7292,20 @@ THREADED_TEST(PreInterceptorHolders) {
THREADED_TEST(ObjectInstantiation) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("t"), PGetter2);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
for (int i = 0; i < 100; i++) {
- v8::HandleScope inner_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope inner_scope(CcTest::isolate());
v8::Handle<v8::Object> obj = templ->NewInstance();
CHECK_NE(obj, context->Global()->Get(v8_str("o")));
context->Global()->Set(v8_str("o2"), obj);
v8::Handle<Value> value =
Script::Compile(v8_str("o.__proto__ === o2.__proto__"))->Run();
- CHECK_EQ(v8::True(), value);
+ CHECK_EQ(v8::True(isolate), value);
context->Global()->Set(v8_str("o"), obj);
}
}
@@ -7208,7 +7361,7 @@ THREADED_TEST(StringWrite) {
"for (var i = 0; i < 0xd800; i += 4) {"
" right = String.fromCharCode(i) + right;"
"}");
- v8::Handle<v8::Object> global = Context::GetCurrent()->Global();
+ v8::Handle<v8::Object> global = context->Global();
Handle<String> left_tree = global->Get(v8_str("left")).As<String>();
Handle<String> right_tree = global->Get(v8_str("right")).As<String>();
@@ -7724,7 +7877,7 @@ static void YSetter(Local<String> name,
THREADED_TEST(DeleteAccessor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
obj->SetAccessor(v8_str("y"), YGetter, YSetter);
LocalContext context;
@@ -7737,7 +7890,7 @@ THREADED_TEST(DeleteAccessor) {
THREADED_TEST(TypeSwitch) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> templ1 = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> templ3 = v8::FunctionTemplate::New();
@@ -7795,7 +7948,8 @@ static void TroubleCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
trouble_nesting++;
// Call a JS function that throws an uncaught exception.
- Local<v8::Object> arg_this = Context::GetCurrent()->Global();
+ Local<v8::Object> arg_this =
+ args.GetIsolate()->GetCurrentContext()->Global();
Local<Value> trouble_callee = (trouble_nesting == 3) ?
arg_this->Get(v8_str("trouble_callee")) :
arg_this->Get(v8_str("trouble_caller"));
@@ -7907,13 +8061,13 @@ TEST(TryCatchFinallyUsingTryCatchHandler) {
// SecurityHandler can't be run twice
TEST(SecurityHandler) {
- v8::HandleScope scope0(v8::Isolate::GetCurrent());
+ v8::HandleScope scope0(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetAccessCheckCallbacks(NamedSecurityTestCallback,
IndexedSecurityTestCallback);
// Create an environment
v8::Handle<Context> context0 =
- Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
+ Context::New(CcTest::isolate(), NULL, global_template);
context0->Enter();
v8::Handle<v8::Object> global0 = context0->Global();
@@ -7926,10 +8080,10 @@ TEST(SecurityHandler) {
CHECK_EQ(999, z0->Int32Value());
// Create another environment, should fail security checks.
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(CcTest::isolate());
v8::Handle<Context> context1 =
- Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
+ Context::New(CcTest::isolate(), NULL, global_template);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
@@ -7947,7 +8101,7 @@ TEST(SecurityHandler) {
// Create another environment, should pass security checks.
{ g_security_callback_result = true; // allow security handler to pass.
- v8::HandleScope scope2(v8::Isolate::GetCurrent());
+ v8::HandleScope scope2(CcTest::isolate());
LocalContext context2;
v8::Handle<v8::Object> global2 = context2->Global();
global2->Set(v8_str("othercontext"), global0);
@@ -8317,7 +8471,7 @@ static bool NamedAccessBlocker(Local<v8::Object> global,
Local<Value> name,
v8::AccessType type,
Local<Value> data) {
- return Context::GetCurrent()->Global()->Equals(global) ||
+ return CcTest::isolate()->GetCurrentContext()->Global()->Equals(global) ||
allowed_access_type[type];
}
@@ -8326,7 +8480,7 @@ static bool IndexedAccessBlocker(Local<v8::Object> global,
uint32_t key,
v8::AccessType type,
Local<Value> data) {
- return Context::GetCurrent()->Global()->Equals(global) ||
+ return CcTest::isolate()->GetCurrentContext()->Global()->Equals(global) ||
allowed_access_type[type];
}
@@ -8383,7 +8537,7 @@ static void UnreachableFunction(
TEST(AccessControl) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
@@ -8662,7 +8816,7 @@ TEST(AccessControl) {
TEST(AccessControlES5) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
@@ -8749,7 +8903,7 @@ static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global,
THREADED_TEST(AccessControlGetOwnPropertyNames) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
@@ -8763,7 +8917,7 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::Handle<v8::Object> global0 = context0->Global();
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(CcTest::isolate());
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
@@ -8809,7 +8963,7 @@ static void NamedPropertyEnumerator(
THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
obj_template->Set(v8_str("7"), v8::Integer::New(7));
@@ -8844,7 +8998,7 @@ static void ConstTenGetter(Local<String> name,
THREADED_TEST(CrossDomainAccessors) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
@@ -8875,7 +9029,7 @@ THREADED_TEST(CrossDomainAccessors) {
global->Set(v8_str("accessible"), v8_num(11));
// Enter a new context.
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(CcTest::isolate());
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
@@ -8921,7 +9075,7 @@ TEST(AccessControlIC) {
named_access_count = 0;
indexed_access_count = 0;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
// Create an environment.
@@ -9069,7 +9223,7 @@ THREADED_TEST(AccessControlFlatten) {
named_access_count = 0;
indexed_access_count = 0;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
// Create an environment.
@@ -9137,7 +9291,7 @@ THREADED_TEST(AccessControlInterceptorIC) {
named_access_count = 0;
indexed_access_count = 0;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
// Create an environment.
@@ -9241,7 +9395,7 @@ static void GlobalObjectInstancePropertiesGet(
THREADED_TEST(GlobalObjectInstanceProperties) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
Local<Value> global_object;
@@ -9297,7 +9451,7 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
THREADED_TEST(CallKnownGlobalReceiver) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
Local<Value> global_object;
@@ -9376,7 +9530,7 @@ static void ShadowNamedGet(Local<String> key,
THREADED_TEST(ShadowObject) {
shadow_y = shadow_y_setter_call_count = shadow_y_getter_call_count = 0;
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope handle_scope(CcTest::isolate());
Local<ObjectTemplate> global_template = v8::ObjectTemplate::New();
LocalContext context(NULL, global_template);
@@ -9676,7 +9830,7 @@ THREADED_TEST(SetPrototypeThrows) {
v8::TryCatch try_catch;
CHECK(!o1->SetPrototype(o0));
CHECK(!try_catch.HasCaught());
- ASSERT(!i::Isolate::Current()->has_pending_exception());
+ ASSERT(!CcTest::i_isolate()->has_pending_exception());
CHECK_EQ(42, CompileRun("function f() { return 42; }; f()")->Int32Value());
}
@@ -9770,7 +9924,8 @@ static void FakeConstructorCallback(
THREADED_TEST(ConstructorForObject) {
LocalContext context;
- v8::HandleScope handle_scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
{ Local<ObjectTemplate> instance_template = ObjectTemplate::New();
instance_template->SetCallAsFunctionHandler(ConstructorCallback);
@@ -9819,7 +9974,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(value->IsBoolean());
CHECK_EQ(true, value->BooleanValue());
- Handle<Value> args3[] = { v8::True() };
+ Handle<Value> args3[] = { v8::True(isolate) };
Local<Value> value_obj3 = instance->CallAsConstructor(1, args3);
CHECK(value_obj3->IsObject());
Local<Object> object3 = Local<Object>::Cast(value_obj3);
@@ -9829,7 +9984,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK_EQ(true, value->BooleanValue());
// Call the Object's constructor with undefined.
- Handle<Value> args4[] = { v8::Undefined() };
+ Handle<Value> args4[] = { v8::Undefined(isolate) };
Local<Value> value_obj4 = instance->CallAsConstructor(1, args4);
CHECK(value_obj4->IsObject());
Local<Object> object4 = Local<Object>::Cast(value_obj4);
@@ -9838,7 +9993,7 @@ THREADED_TEST(ConstructorForObject) {
CHECK(value->IsUndefined());
// Call the Object's constructor with null.
- Handle<Value> args5[] = { v8::Null() };
+ Handle<Value> args5[] = { v8::Null(isolate) };
Local<Value> value_obj5 = instance->CallAsConstructor(1, args5);
CHECK(value_obj5->IsObject());
Local<Object> object5 = Local<Object>::Cast(value_obj5);
@@ -9997,7 +10152,7 @@ THREADED_TEST(EvalAliasedDynamic) {
THREADED_TEST(CrossEval) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext other;
LocalContext current;
@@ -10080,7 +10235,7 @@ THREADED_TEST(CrossEval) {
// its global throws an exception. This behavior is consistent with
// other JavaScript implementations.
THREADED_TEST(EvalInDetachedGlobal) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> context0 = Context::New(isolate);
@@ -10113,7 +10268,7 @@ THREADED_TEST(EvalInDetachedGlobal) {
THREADED_TEST(CrossLazyLoad) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext other;
LocalContext current;
@@ -10145,6 +10300,11 @@ static void call_as_function(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
+static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(args.This());
+}
+
+
// Test that a call handler can be set for objects which will allow
// non-function objects created through the API to be called as
// functions.
@@ -10257,6 +10417,81 @@ THREADED_TEST(CallAsFunction) {
CHECK_EQ("23", *exception_value2);
try_catch.Reset();
}
+
+ { v8::Isolate* isolate = context->GetIsolate();
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+ instance_template->SetCallAsFunctionHandler(ReturnThis);
+ Local<v8::Object> instance = t->GetFunction()->NewInstance();
+
+ Local<v8::Value> a1 =
+ instance->CallAsFunction(v8::Undefined(isolate), 0, NULL);
+ CHECK(a1->StrictEquals(instance));
+ Local<v8::Value> a2 =
+ instance->CallAsFunction(v8::Null(isolate), 0, NULL);
+ CHECK(a2->StrictEquals(instance));
+ Local<v8::Value> a3 =
+ instance->CallAsFunction(v8_num(42), 0, NULL);
+ CHECK(a3->StrictEquals(instance));
+ Local<v8::Value> a4 =
+ instance->CallAsFunction(v8_str("hello"), 0, NULL);
+ CHECK(a4->StrictEquals(instance));
+ Local<v8::Value> a5 =
+ instance->CallAsFunction(v8::True(isolate), 0, NULL);
+ CHECK(a5->StrictEquals(instance));
+ }
+
+ { v8::Isolate* isolate = context->GetIsolate();
+ CompileRun(
+ "function ReturnThisSloppy() {"
+ " return this;"
+ "}"
+ "function ReturnThisStrict() {"
+ " 'use strict';"
+ " return this;"
+ "}");
+ Local<Function> ReturnThisSloppy =
+ Local<Function>::Cast(
+ context->Global()->Get(v8_str("ReturnThisSloppy")));
+ Local<Function> ReturnThisStrict =
+ Local<Function>::Cast(
+ context->Global()->Get(v8_str("ReturnThisStrict")));
+
+ Local<v8::Value> a1 =
+ ReturnThisSloppy->CallAsFunction(v8::Undefined(isolate), 0, NULL);
+ CHECK(a1->StrictEquals(context->Global()));
+ Local<v8::Value> a2 =
+ ReturnThisSloppy->CallAsFunction(v8::Null(isolate), 0, NULL);
+ CHECK(a2->StrictEquals(context->Global()));
+ Local<v8::Value> a3 =
+ ReturnThisSloppy->CallAsFunction(v8_num(42), 0, NULL);
+ CHECK(a3->IsNumberObject());
+ CHECK_EQ(42.0, a3.As<v8::NumberObject>()->ValueOf());
+ Local<v8::Value> a4 =
+ ReturnThisSloppy->CallAsFunction(v8_str("hello"), 0, NULL);
+ CHECK(a4->IsStringObject());
+ CHECK(a4.As<v8::StringObject>()->ValueOf()->StrictEquals(v8_str("hello")));
+ Local<v8::Value> a5 =
+ ReturnThisSloppy->CallAsFunction(v8::True(isolate), 0, NULL);
+ CHECK(a5->IsBooleanObject());
+ CHECK(a5.As<v8::BooleanObject>()->ValueOf());
+
+ Local<v8::Value> a6 =
+ ReturnThisStrict->CallAsFunction(v8::Undefined(isolate), 0, NULL);
+ CHECK(a6->IsUndefined());
+ Local<v8::Value> a7 =
+ ReturnThisStrict->CallAsFunction(v8::Null(isolate), 0, NULL);
+ CHECK(a7->IsNull());
+ Local<v8::Value> a8 =
+ ReturnThisStrict->CallAsFunction(v8_num(42), 0, NULL);
+ CHECK(a8->StrictEquals(v8_num(42)));
+ Local<v8::Value> a9 =
+ ReturnThisStrict->CallAsFunction(v8_str("hello"), 0, NULL);
+ CHECK(a9->StrictEquals(v8_str("hello")));
+ Local<v8::Value> a10 =
+ ReturnThisStrict->CallAsFunction(v8::True(isolate), 0, NULL);
+ CHECK(a10->StrictEquals(v8::True(isolate)));
+ }
}
@@ -10309,7 +10544,7 @@ static int CountHandles() {
static int Recurse(int depth, int iterations) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
if (depth == 0) return CountHandles();
for (int i = 0; i < iterations; i++) {
Local<v8::Number> n(v8::Integer::New(42));
@@ -10323,7 +10558,7 @@ THREADED_TEST(HandleIteration) {
static const int kNesting = 200;
CHECK_EQ(0, CountHandles());
{
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(CcTest::isolate());
CHECK_EQ(0, CountHandles());
for (int i = 0; i < kIterations; i++) {
Local<v8::Number> n(v8::Integer::New(42));
@@ -10332,7 +10567,7 @@ THREADED_TEST(HandleIteration) {
CHECK_EQ(kIterations, CountHandles());
{
- v8::HandleScope scope2(v8::Isolate::GetCurrent());
+ v8::HandleScope scope2(CcTest::isolate());
for (int j = 0; j < kIterations; j++) {
Local<v8::Number> n(v8::Integer::New(42));
CHECK_EQ(j + 1 + kIterations, CountHandles());
@@ -10379,7 +10614,7 @@ static void InterceptorHasOwnPropertyGetterGC(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -10420,7 +10655,7 @@ typedef void (*NamedPropertyGetter)(
static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
const char* source,
int expected) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(getter, 0, 0, 0, 0, v8_str("data"));
LocalContext context;
@@ -10434,7 +10669,7 @@ static void InterceptorLoadICGetter(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
CHECK_EQ(v8_str("data"), info.Data());
CHECK_EQ(v8_str("x"), name);
@@ -10633,7 +10868,7 @@ static void SetOnThis(Local<String> name,
THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
templ->SetAccessor(v8_str("y"), Return239Callback);
@@ -10662,7 +10897,7 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
@@ -10695,7 +10930,7 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
templ->SetAccessor(v8_str("y"), Return239Callback);
@@ -10723,7 +10958,7 @@ THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
// Test the case when we stored callback into
// a stub, but interceptor produced value on its own.
THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
@@ -10751,7 +10986,7 @@ THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
// Test the case when we stored callback into
// a stub, but it got invalidated later on.
THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
@@ -10783,7 +11018,7 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
// a stub, but it got invalidated later on due to override on
// global object which is between interceptor and callbacks' holders.
THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
@@ -10838,7 +11073,7 @@ static void InterceptorStoreICSetter(
// This test should hit the store IC for the interceptor case.
THREADED_TEST(InterceptorStoreIC) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorLoadICGetter,
InterceptorStoreICSetter,
@@ -10853,7 +11088,7 @@ THREADED_TEST(InterceptorStoreIC) {
THREADED_TEST(InterceptorStoreICWithNoSetter) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
LocalContext context;
@@ -10884,7 +11119,7 @@ static void InterceptorCallICGetter(
// This test should hit the call IC for the interceptor case.
THREADED_TEST(InterceptorCallIC) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorCallICGetter);
LocalContext context;
@@ -10903,7 +11138,7 @@ THREADED_TEST(InterceptorCallIC) {
// This test checks that if interceptor doesn't provide
// a value, we can fetch regular value.
THREADED_TEST(InterceptorCallICSeesOthers) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -10932,7 +11167,7 @@ static void InterceptorCallICGetter4(
// even if we cached shadowed variant, interceptor's function
// is invoked
THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorCallICGetter4);
LocalContext context;
@@ -10952,7 +11187,7 @@ THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
// Test the case when we stored cacheable lookup into
// a stub, but it got invalidated later on
THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -10979,7 +11214,7 @@ THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
// This test checks that if interceptor doesn't provide a function,
// cached constant function is used
THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11010,7 +11245,7 @@ static void InterceptorCallICGetter5(
// even if we cached constant function, interceptor's function
// is invoked
THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorCallICGetter5);
LocalContext context;
@@ -11043,7 +11278,7 @@ static void InterceptorCallICGetter6(
// to test the optimized compiler.
THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorCallICGetter6);
LocalContext context;
@@ -11073,7 +11308,7 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
// Test the case when we stored constant function into
// a stub, but it got invalidated later on
THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11103,7 +11338,7 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
// a stub, but it got invalidated later on due to override on
// global object which is between interceptor and constant function' holders.
THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11128,7 +11363,7 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
// Test the case when actual function to call sits on global object.
THREADED_TEST(InterceptorCallICCachedFromGlobal) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
@@ -11163,7 +11398,7 @@ static void InterceptorCallICFastApi(
reinterpret_cast<int*>(v8::External::Cast(*info.Data())->Value());
++(*call_count);
if ((*call_count) % 20 == 0) {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
}
@@ -11171,7 +11406,7 @@ static void FastApiCallback_TrivialSignature(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CheckReturnValue(args, FUNCTION_ADDR(FastApiCallback_TrivialSignature));
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, args.GetIsolate());
CHECK_EQ(args.This(), args.Holder());
CHECK(args.Data()->Equals(v8_str("method_data")));
@@ -11182,7 +11417,7 @@ static void FastApiCallback_SimpleSignature(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CheckReturnValue(args, FUNCTION_ADDR(FastApiCallback_SimpleSignature));
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, args.GetIsolate());
CHECK_EQ(args.This()->GetPrototype(), args.Holder());
CHECK(args.Data()->Equals(v8_str("method_data")));
@@ -11207,7 +11442,7 @@ static void GenerateSomeGarbage() {
void DirectApiCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
static int count = 0;
if (count++ % 3 == 0) {
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
// This should move the stub
GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
}
@@ -11235,7 +11470,7 @@ THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
void ThrowingDirectApiCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::ThrowException(v8_str("g"));
+ args.GetIsolate()->ThrowException(v8_str("g"));
}
@@ -11262,7 +11497,7 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
static Handle<Value> DoDirectGetter() {
if (++p_getter_count % 3 == 0) {
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
GenerateSomeGarbage();
}
return v8_str("Direct Getter Result");
@@ -11303,7 +11538,7 @@ THREADED_PROFILED_TEST(LoadICFastApi_DirectCall_GCMoveStub) {
void ThrowingDirectGetterCallback(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- v8::ThrowException(v8_str("g"));
+ info.GetIsolate()->ThrowException(v8_str("g"));
}
@@ -11325,7 +11560,7 @@ THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_TrivialSignature,
@@ -11353,7 +11588,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
int interceptor_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11385,7 +11620,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
int interceptor_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11423,7 +11658,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
int interceptor_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11461,7 +11696,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
int interceptor_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11502,7 +11737,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
int interceptor_call_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11542,7 +11777,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_TrivialSignature,
@@ -11567,7 +11802,7 @@ THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11596,7 +11831,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11630,7 +11865,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11667,7 +11902,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
v8::Handle<v8::FunctionTemplate> method_templ =
v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
@@ -11718,7 +11953,7 @@ static void InterceptorKeyedCallICGetter(
// Test the case when we stored cacheable lookup into
// a stub, but the function name changed (to another cacheable function).
THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11742,7 +11977,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
// a stub, but the function name changed (and the new function is present
// both before and after the interceptor in the prototype chain).
THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorKeyedCallICGetter);
LocalContext context;
@@ -11769,7 +12004,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
// Same as InterceptorKeyedCallICKeyChange1 only the cacheable function sit
// on the global object.
THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11794,7 +12029,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
// Test the case when actual function to call sits on global object.
THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11819,7 +12054,7 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
// Test the map transition before the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11841,7 +12076,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
// Test the map transition after the interceptor.
THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
templ_o->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
@@ -11877,7 +12112,7 @@ static void InterceptorICRefErrorGetter(
// Once in a while, the interceptor will reply that a property was not
// found in which case we should get a reference error.
THREADED_TEST(InterceptorICReferenceErrors) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorICRefErrorGetter);
LocalContext context(0, templ, v8::Handle<Value>());
@@ -11914,7 +12149,7 @@ static void InterceptorICExceptionGetter(
info.GetReturnValue().Set(call_ic_function3);
}
if (interceptor_ic_exception_get_count == 20) {
- v8::ThrowException(v8_num(42));
+ info.GetIsolate()->ThrowException(v8_num(42));
return;
}
}
@@ -11924,7 +12159,7 @@ static void InterceptorICExceptionGetter(
// exception once in a while.
THREADED_TEST(InterceptorICGetterExceptions) {
interceptor_ic_exception_get_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorICExceptionGetter);
LocalContext context(0, templ, v8::Handle<Value>());
@@ -11959,7 +12194,7 @@ static void InterceptorICExceptionSetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
if (++interceptor_ic_exception_set_count > 20) {
- v8::ThrowException(v8_num(42));
+ info.GetIsolate()->ThrowException(v8_num(42));
}
}
@@ -11968,7 +12203,7 @@ static void InterceptorICExceptionSetter(
// once in a while.
THREADED_TEST(InterceptorICSetterExceptions) {
interceptor_ic_exception_set_count = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(0, InterceptorICExceptionSetter);
LocalContext context(0, templ, v8::Handle<Value>());
@@ -11986,7 +12221,7 @@ THREADED_TEST(InterceptorICSetterExceptions) {
// Test that we ignore null interceptors.
THREADED_TEST(NullNamedInterceptor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(
static_cast<v8::NamedPropertyGetterCallback>(0));
@@ -12002,7 +12237,7 @@ THREADED_TEST(NullNamedInterceptor) {
// Test that we ignore null interceptors.
THREADED_TEST(NullIndexedInterceptor) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(
static_cast<v8::IndexedPropertyGetterCallback>(0));
@@ -12017,7 +12252,7 @@ THREADED_TEST(NullIndexedInterceptor) {
THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
templ->InstanceTemplate()->SetNamedPropertyHandler(InterceptorLoadXICGetter);
LocalContext env;
@@ -12031,7 +12266,7 @@ THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
static void ThrowingGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- ThrowException(Handle<Value>());
+ info.GetIsolate()->ThrowException(Handle<Value>());
info.GetReturnValue().SetUndefined();
}
@@ -12097,8 +12332,8 @@ static void ThrowingCallbackWithTryCatch(
try_catch.SetVerbose(true);
CompileRun("throw 'from JS';");
CHECK(try_catch.HasCaught());
- CHECK(!i::Isolate::Current()->has_pending_exception());
- CHECK(!i::Isolate::Current()->has_scheduled_exception());
+ CHECK(!CcTest::i_isolate()->has_pending_exception());
+ CHECK(!CcTest::i_isolate()->has_scheduled_exception());
}
@@ -12116,7 +12351,7 @@ static void ThrowFromJS(Handle<Message> message, Handle<Value> data) {
static void ThrowViaApi(Handle<Message> message, Handle<Value> data) {
- if (--call_depth) ThrowException(v8_str("ThrowViaApi"));
+ if (--call_depth) CcTest::isolate()->ThrowException(v8_str("ThrowViaApi"));
}
@@ -12237,7 +12472,7 @@ static void IsConstructHandler(
THREADED_TEST(IsConstructCall) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
// Function template with call handler.
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
@@ -12254,7 +12489,7 @@ THREADED_TEST(IsConstructCall) {
THREADED_TEST(ObjectProtoToString) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
templ->SetClassName(v8_str("MyClass"));
@@ -12354,7 +12589,7 @@ void ApiTestFuzzer::Run() {
gate_.Wait();
{
// ... get the V8 lock and start running the test.
- v8::Locker locker(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
CallTest();
}
// This test finished.
@@ -12418,7 +12653,7 @@ void ApiTestFuzzer::ContextSwitch() {
// If the new thread is the same as the current thread there is nothing to do.
if (NextThread()) {
// Now it can start.
- v8::Unlocker unlocker(CcTest::default_isolate());
+ v8::Unlocker unlocker(CcTest::isolate());
// Wait till someone starts us again.
gate_.Wait();
// And we're off.
@@ -12465,6 +12700,7 @@ TEST(Threading4) {
void ApiTestFuzzer::CallTest() {
+ v8::Isolate::Scope scope(CcTest::isolate());
if (kLogThreading)
printf("Start test %d\n", test_number_);
CallTestNumber(test_number_);
@@ -12474,13 +12710,14 @@ void ApiTestFuzzer::CallTest() {
static void ThrowInJS(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
+ v8::Isolate* isolate = args.GetIsolate();
+ CHECK(v8::Locker::IsLocked(isolate));
ApiTestFuzzer::Fuzz();
- v8::Unlocker unlocker(CcTest::default_isolate());
+ v8::Unlocker unlocker(isolate);
const char* code = "throw 7;";
{
- v8::Locker nested_locker(CcTest::default_isolate());
- v8::HandleScope scope(args.GetIsolate());
+ v8::Locker nested_locker(isolate);
+ v8::HandleScope scope(isolate);
v8::Handle<Value> exception;
{ v8::TryCatch try_catch;
v8::Handle<Value> value = CompileRun(code);
@@ -12489,20 +12726,20 @@ static void ThrowInJS(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Make sure to wrap the exception in a new handle because
// the handle returned from the TryCatch is destroyed
// when the TryCatch is destroyed.
- exception = Local<Value>::New(try_catch.Exception());
+ exception = Local<Value>::New(isolate, try_catch.Exception());
}
- v8::ThrowException(exception);
+ args.GetIsolate()->ThrowException(exception);
}
}
static void ThrowInJSNoCatch(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
+ CHECK(v8::Locker::IsLocked(CcTest::isolate()));
ApiTestFuzzer::Fuzz();
- v8::Unlocker unlocker(CcTest::default_isolate());
+ v8::Unlocker unlocker(CcTest::isolate());
const char* code = "throw 7;";
{
- v8::Locker nested_locker(CcTest::default_isolate());
+ v8::Locker nested_locker(CcTest::isolate());
v8::HandleScope scope(args.GetIsolate());
v8::Handle<Value> value = CompileRun(code);
CHECK(value.IsEmpty());
@@ -12514,8 +12751,8 @@ static void ThrowInJSNoCatch(const v8::FunctionCallbackInfo<v8::Value>& args) {
// These are locking tests that don't need to be run again
// as part of the locking aggregation tests.
TEST(NestedLockers) {
- v8::Locker locker(CcTest::default_isolate());
- CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
+ v8::Locker locker(CcTest::isolate());
+ CHECK(v8::Locker::IsLocked(CcTest::isolate()));
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(ThrowInJS);
@@ -12536,7 +12773,7 @@ TEST(NestedLockers) {
// These are locking tests that don't need to be run again
// as part of the locking aggregation tests.
TEST(NestedLockersNoTryCatch) {
- v8::Locker locker(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
@@ -12556,24 +12793,24 @@ TEST(NestedLockersNoTryCatch) {
THREADED_TEST(RecursiveLocking) {
- v8::Locker locker(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
{
- v8::Locker locker2(CcTest::default_isolate());
- CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
+ v8::Locker locker2(CcTest::isolate());
+ CHECK(v8::Locker::IsLocked(CcTest::isolate()));
}
}
static void UnlockForAMoment(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
- v8::Unlocker unlocker(CcTest::default_isolate());
+ v8::Unlocker unlocker(CcTest::isolate());
}
THREADED_TEST(LockUnlockLock) {
{
- v8::Locker locker(CcTest::default_isolate());
- v8::HandleScope scope(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(UnlockForAMoment);
@@ -12586,8 +12823,8 @@ THREADED_TEST(LockUnlockLock) {
CHECK_EQ(42, script->Run()->Int32Value());
}
{
- v8::Locker locker(CcTest::default_isolate());
- v8::HandleScope scope(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(UnlockForAMoment);
@@ -12603,9 +12840,9 @@ THREADED_TEST(LockUnlockLock) {
static int GetGlobalObjectsCount() {
- i::Isolate::Current()->heap()->EnsureHeapIsIterable();
+ CcTest::heap()->EnsureHeapIsIterable();
int count = 0;
- i::HeapIterator it(HEAP);
+ i::HeapIterator it(CcTest::heap());
for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
if (object->IsJSGlobalObject()) count++;
return count;
@@ -12618,11 +12855,11 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
int count = GetGlobalObjectsCount();
#ifdef DEBUG
- if (count != expected) HEAP->TracePathToGlobal();
+ if (count != expected) CcTest::heap()->TracePathToGlobal();
#endif
CHECK_EQ(expected, count);
}
@@ -12634,27 +12871,27 @@ TEST(DontLeakGlobalObjects) {
v8::V8::Initialize();
for (int i = 0; i < 5; i++) {
- { v8::HandleScope scope(v8::Isolate::GetCurrent());
+ { v8::HandleScope scope(CcTest::isolate());
LocalContext context;
}
v8::V8::ContextDisposedNotification();
CheckSurvivingGlobalObjectsCount(0);
- { v8::HandleScope scope(v8::Isolate::GetCurrent());
+ { v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8_compile("Date")->Run();
}
v8::V8::ContextDisposedNotification();
CheckSurvivingGlobalObjectsCount(0);
- { v8::HandleScope scope(v8::Isolate::GetCurrent());
+ { v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8_compile("/aaa/")->Run();
}
v8::V8::ContextDisposedNotification();
CheckSurvivingGlobalObjectsCount(0);
- { v8::HandleScope scope(v8::Isolate::GetCurrent());
+ { v8::HandleScope scope(CcTest::isolate());
const char* extension_list[] = { "v8/gc" };
v8::ExtensionConfiguration extensions(1, extension_list);
LocalContext context(&extensions);
@@ -12665,17 +12902,6 @@ TEST(DontLeakGlobalObjects) {
}
}
-template<class T>
-struct CopyablePersistentTraits {
- typedef Persistent<T, CopyablePersistentTraits<T> > CopyablePersistent;
- static const bool kResetInDestructor = true;
- template<class S, class M>
- static V8_INLINE void Copy(const Persistent<S, M>& source,
- CopyablePersistent* dest) {
- // do nothing, just allow copy
- }
-};
-
TEST(CopyablePersistent) {
LocalContext context;
@@ -12683,19 +12909,20 @@ TEST(CopyablePersistent) {
i::GlobalHandles* globals =
reinterpret_cast<i::Isolate*>(isolate)->global_handles();
int initial_handles = globals->global_handles_count();
+ typedef v8::Persistent<v8::Object, v8::CopyablePersistentTraits<v8::Object> >
+ CopyableObject;
{
- v8::Persistent<v8::Object, CopyablePersistentTraits<v8::Object> > handle1;
+ CopyableObject handle1;
{
v8::HandleScope scope(isolate);
handle1.Reset(isolate, v8::Object::New());
}
CHECK_EQ(initial_handles + 1, globals->global_handles_count());
- v8::Persistent<v8::Object, CopyablePersistentTraits<v8::Object> > handle2;
+ CopyableObject handle2;
handle2 = handle1;
CHECK(handle1 == handle2);
CHECK_EQ(initial_handles + 2, globals->global_handles_count());
- v8::Persistent<v8::Object, CopyablePersistentTraits<v8::Object> >
- handle3(handle2);
+ CopyableObject handle3(handle2);
CHECK(handle1 == handle3);
CHECK_EQ(initial_handles + 3, globals->global_handles_count());
}
@@ -12764,7 +12991,7 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
// weak callback of the first handle would be able to 'reallocate' it.
handle1.MakeWeak<v8::Value, void>(NULL, NewPersistentHandleCallback);
handle2.Dispose();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -12774,7 +13001,7 @@ void DisposeAndForceGcCallback(v8::Isolate* isolate,
v8::Persistent<v8::Value>* handle,
void*) {
to_be_disposed.Dispose();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
handle->Dispose();
}
@@ -12791,7 +13018,7 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
}
handle1.MakeWeak<v8::Value, void>(NULL, DisposeAndForceGcCallback);
to_be_disposed.Reset(isolate, handle2);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
void DisposingCallback(v8::Isolate* isolate,
@@ -12822,7 +13049,7 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
}
handle2.MakeWeak<v8::Value, void>(NULL, DisposingCallback);
handle3.MakeWeak<v8::Value, void>(NULL, HandleCreatingCallback);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -12837,11 +13064,11 @@ THREADED_TEST(CheckForCrossContextObjectLiterals) {
for (int i = 0; i < nof; i++) {
const char* source = sources[i];
- { v8::HandleScope scope(v8::Isolate::GetCurrent());
+ { v8::HandleScope scope(CcTest::isolate());
LocalContext context;
CompileRun(source);
}
- { v8::HandleScope scope(v8::Isolate::GetCurrent());
+ { v8::HandleScope scope(CcTest::isolate());
LocalContext context;
CompileRun(source);
}
@@ -12860,7 +13087,7 @@ static v8::Handle<Value> NestedScope(v8::Local<Context> env) {
THREADED_TEST(NestedHandleScopeAndContexts) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer(isolate);
v8::Local<Context> env = Context::New(isolate);
env->Enter();
@@ -13354,7 +13581,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
}
-TEST(SetJitCodeEventHandler) {
+UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::FLAG_stress_compaction = true;
i::FLAG_incremental_marking = false;
const char* script =
@@ -13371,6 +13598,7 @@ TEST(SetJitCodeEventHandler) {
// have remnants of state from other code.
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
{
v8::HandleScope scope(isolate);
@@ -13389,9 +13617,9 @@ TEST(SetJitCodeEventHandler) {
// different fragmented code-space pages.
const int kIterations = 10;
for (int i = 0; i < kIterations; ++i) {
- LocalContext env;
+ LocalContext env(isolate);
i::AlwaysAllocateScope always_allocate;
- SimulateFullSpace(HEAP->code_space());
+ SimulateFullSpace(heap->code_space());
CompileRun(script);
// Keep a strong reference to the code object in the handle scope.
@@ -13405,7 +13633,7 @@ TEST(SetJitCodeEventHandler) {
}
// Force code movement.
- HEAP->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
+ heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
@@ -13427,7 +13655,7 @@ TEST(SetJitCodeEventHandler) {
// request enumeration of existing code.
{
v8::HandleScope scope(isolate);
- LocalContext env;
+ LocalContext env(isolate);
CompileRun(script);
// Now get code through initial iteration.
@@ -13459,7 +13687,7 @@ static int64_t cast(intptr_t x) { return static_cast<int64_t>(x); }
THREADED_TEST(ExternalAllocatedMemory) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer(isolate);
v8::Local<Context> env(Context::New(isolate));
CHECK(!env.IsEmpty());
@@ -13472,28 +13700,6 @@ THREADED_TEST(ExternalAllocatedMemory) {
}
-THREADED_TEST(DisposeEnteredContext) {
- LocalContext outer;
- v8::Isolate* isolate = outer->GetIsolate();
- v8::Persistent<v8::Context> inner;
- {
- v8::HandleScope scope(isolate);
- inner.Reset(isolate, v8::Context::New(isolate));
- }
- v8::HandleScope scope(isolate);
- {
- // Don't want a handle here, so do this unsafely
- v8::Handle<v8::Context> inner_local =
- v8::Utils::Convert<i::Object, v8::Context>(
- v8::Utils::OpenPersistent(inner));
- inner_local->Enter();
- inner.Dispose();
- inner.Clear();
- inner_local->Exit();
- }
-}
-
-
// Regression test for issue 54, object templates with internal fields
// but no accessors or interceptors did not get their internal field
// count set on instances.
@@ -13534,7 +13740,7 @@ TEST(CatchStackOverflow) {
static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
const char* resource_name,
int line_offset) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::TryCatch try_catch;
v8::Handle<v8::Value> result = script->Run();
CHECK(result.IsEmpty());
@@ -13738,11 +13944,12 @@ static bool IndexedSetAccessBlocker(Local<v8::Object> obj,
THREADED_TEST(DisableAccessChecksWhileConfiguring) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessCheckCallbacks(NamedSetAccessBlocker,
IndexedSetAccessBlocker);
- templ->Set(v8_str("x"), v8::True());
+ templ->Set(v8_str("x"), v8::True(isolate));
Local<v8::Object> instance = templ->NewInstance();
context->Global()->Set(v8_str("obj"), instance);
Local<Value> value = CompileRun("obj.x");
@@ -13807,7 +14014,7 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
// This tests that access check information remains on the global
// object template when creating contexts.
THREADED_TEST(AccessControlRepeatedContextCreation) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetAccessCheckCallbacks(NamedSetAccessBlocker,
@@ -13825,7 +14032,7 @@ THREADED_TEST(AccessControlRepeatedContextCreation) {
THREADED_TEST(TurnOnAccessCheck) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
// Create an environment with access check to the global object disabled by
@@ -13906,7 +14113,7 @@ static bool NamedGetAccessBlockAandH(Local<v8::Object> obj,
THREADED_TEST(TurnOnAccessCheckAndRecompile) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
// Create an environment with access check to the global object disabled by
@@ -13999,9 +14206,10 @@ TEST(PreCompile) {
// TODO(155): This test would break without the initialization of V8. This is
// a workaround for now to make this test not fail.
v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
const char* script = "function foo(a) { return a+1; }";
v8::ScriptData* sd =
- v8::ScriptData::PreCompile(script, i::StrLength(script));
+ v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
CHECK_NE(sd->Length(), 0);
CHECK_NE(sd->Data(), NULL);
CHECK(!sd->HasError());
@@ -14011,9 +14219,10 @@ TEST(PreCompile) {
TEST(PreCompileWithError) {
v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
const char* script = "function foo(a) { return 1 * * 2; }";
v8::ScriptData* sd =
- v8::ScriptData::PreCompile(script, i::StrLength(script));
+ v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
CHECK(sd->HasError());
delete sd;
}
@@ -14021,9 +14230,10 @@ TEST(PreCompileWithError) {
TEST(Regress31661) {
v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
const char* script = " The Definintive Guide";
v8::ScriptData* sd =
- v8::ScriptData::PreCompile(script, i::StrLength(script));
+ v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
CHECK(sd->HasError());
delete sd;
}
@@ -14032,9 +14242,10 @@ TEST(Regress31661) {
// Tests that ScriptData can be serialized and deserialized.
TEST(PreCompileSerialization) {
v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
const char* script = "function foo(a) { return a+1; }";
v8::ScriptData* sd =
- v8::ScriptData::PreCompile(script, i::StrLength(script));
+ v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
// Serialize.
int serialized_data_length = sd->Length();
@@ -14071,13 +14282,14 @@ TEST(PreCompileDeserializationError) {
// Attempts to deserialize bad data.
TEST(PreCompileInvalidPreparseDataError) {
v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
const char* script = "function foo(){ return 5;}\n"
"function bar(){ return 6 + 7;} foo();";
v8::ScriptData* sd =
- v8::ScriptData::PreCompile(script, i::StrLength(script));
+ v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
CHECK(!sd->HasError());
// ScriptDataImpl private implementation details
const int kHeaderSize = i::PreparseDataConstants::kHeaderSize;
@@ -14103,7 +14315,7 @@ TEST(PreCompileInvalidPreparseDataError) {
// Overwrite function bar's start position with 200. The function entry
// will not be found when searching for it by position and we should fall
// back on eager compilation.
- sd = v8::ScriptData::PreCompile(script, i::StrLength(script));
+ sd = v8::ScriptData::PreCompile(isolate, script, i::StrLength(script));
sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
@@ -14118,12 +14330,13 @@ TEST(PreCompileInvalidPreparseDataError) {
// the same results (at least for one trivial case).
TEST(PreCompileAPIVariationsAreSame) {
v8::V8::Initialize();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
const char* cstring = "function foo(a) { return a+1; }";
v8::ScriptData* sd_from_cstring =
- v8::ScriptData::PreCompile(cstring, i::StrLength(cstring));
+ v8::ScriptData::PreCompile(isolate, cstring, i::StrLength(cstring));
TestAsciiResource* resource = new TestAsciiResource(cstring);
v8::ScriptData* sd_from_external_string = v8::ScriptData::PreCompile(
@@ -14155,18 +14368,18 @@ TEST(PreCompileAPIVariationsAreSame) {
// arise because we share code between contexts via the compilation
// cache.
THREADED_TEST(DictionaryICLoadedFunction) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
// Test LoadIC.
for (int i = 0; i < 2; i++) {
LocalContext context;
- context->Global()->Set(v8_str("tmp"), v8::True());
+ context->Global()->Set(v8_str("tmp"), v8::True(CcTest::isolate()));
context->Global()->Delete(v8_str("tmp"));
CompileRun("for (var j = 0; j < 10; j++) new RegExp('');");
}
// Test CallIC.
for (int i = 0; i < 2; i++) {
LocalContext context;
- context->Global()->Set(v8_str("tmp"), v8::True());
+ context->Global()->Set(v8_str("tmp"), v8::True(CcTest::isolate()));
context->Global()->Delete(v8_str("tmp"));
CompileRun("for (var j = 0; j < 10; j++) RegExp('')");
}
@@ -14176,7 +14389,7 @@ THREADED_TEST(DictionaryICLoadedFunction) {
// Test that cross-context new calls use the context of the callee to
// create the new JavaScript object.
THREADED_TEST(CrossContextNew) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> context0 = Context::New(isolate);
v8::Local<Context> context1 = Context::New(isolate);
@@ -14203,128 +14416,6 @@ THREADED_TEST(CrossContextNew) {
}
-class RegExpInterruptTest {
- public:
- RegExpInterruptTest() : block_(0) {}
- ~RegExpInterruptTest() {}
- void RunTest() {
- gc_count_ = 0;
- gc_during_regexp_ = 0;
- regexp_success_ = false;
- gc_success_ = false;
- GCThread gc_thread(this);
- gc_thread.Start();
- v8::Locker::StartPreemption(1);
-
- LongRunningRegExp();
- {
- v8::Unlocker unlock(CcTest::default_isolate());
- gc_thread.Join();
- }
- v8::Locker::StopPreemption();
- CHECK(regexp_success_);
- CHECK(gc_success_);
- }
-
- private:
- // Number of garbage collections required.
- static const int kRequiredGCs = 5;
-
- class GCThread : public i::Thread {
- public:
- explicit GCThread(RegExpInterruptTest* test)
- : Thread("GCThread"), test_(test) {}
- virtual void Run() {
- test_->CollectGarbage();
- }
- private:
- RegExpInterruptTest* test_;
- };
-
- void CollectGarbage() {
- block_.Wait();
- while (gc_during_regexp_ < kRequiredGCs) {
- {
- v8::Locker lock(CcTest::default_isolate());
- // TODO(lrn): Perhaps create some garbage before collecting.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- gc_count_++;
- }
- i::OS::Sleep(1);
- }
- gc_success_ = true;
- }
-
- void LongRunningRegExp() {
- block_.Signal(); // Enable garbage collection thread on next preemption.
- int rounds = 0;
- while (gc_during_regexp_ < kRequiredGCs) {
- int gc_before = gc_count_;
- {
- // Match 15-30 "a"'s against 14 and a "b".
- const char* c_source =
- "/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/"
- ".exec('aaaaaaaaaaaaaaab') === null";
- Local<String> source = String::New(c_source);
- Local<Script> script = Script::Compile(source);
- Local<Value> result = script->Run();
- if (!result->BooleanValue()) {
- gc_during_regexp_ = kRequiredGCs; // Allow gc thread to exit.
- return;
- }
- }
- {
- // Match 15-30 "a"'s against 15 and a "b".
- const char* c_source =
- "/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/"
- ".exec('aaaaaaaaaaaaaaaab')[0] === 'aaaaaaaaaaaaaaaa'";
- Local<String> source = String::New(c_source);
- Local<Script> script = Script::Compile(source);
- Local<Value> result = script->Run();
- if (!result->BooleanValue()) {
- gc_during_regexp_ = kRequiredGCs;
- return;
- }
- }
- int gc_after = gc_count_;
- gc_during_regexp_ += gc_after - gc_before;
- rounds++;
- i::OS::Sleep(1);
- }
- regexp_success_ = true;
- }
-
- i::Semaphore block_;
- int gc_count_;
- int gc_during_regexp_;
- bool regexp_success_;
- bool gc_success_;
-};
-
-
-// Test that a regular expression execution can be interrupted and
-// survive a garbage collection.
-TEST(RegExpInterruption) {
- v8::Locker lock(CcTest::default_isolate());
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::default_isolate());
- Local<Context> local_env;
- {
- LocalContext env;
- local_env = env.local();
- }
-
- // Local context should still be live.
- CHECK(!local_env.IsEmpty());
- local_env->Enter();
-
- // Should complete without problems.
- RegExpInterruptTest().RunTest();
-
- local_env->Exit();
-}
-
-
class ApplyInterruptTest {
public:
ApplyInterruptTest() : block_(0) {}
@@ -14336,14 +14427,15 @@ class ApplyInterruptTest {
gc_success_ = false;
GCThread gc_thread(this);
gc_thread.Start();
- v8::Locker::StartPreemption(1);
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Locker::StartPreemption(isolate, 1);
LongRunningApply();
{
- v8::Unlocker unlock(CcTest::default_isolate());
+ v8::Unlocker unlock(isolate);
gc_thread.Join();
}
- v8::Locker::StopPreemption();
+ v8::Locker::StopPreemption(isolate);
CHECK(apply_success_);
CHECK(gc_success_);
}
@@ -14367,8 +14459,9 @@ class ApplyInterruptTest {
block_.Wait();
while (gc_during_apply_ < kRequiredGCs) {
{
- v8::Locker lock(CcTest::default_isolate());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ v8::Locker lock(CcTest::isolate());
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
gc_count_++;
}
i::OS::Sleep(1);
@@ -14413,9 +14506,9 @@ class ApplyInterruptTest {
// Test that nothing bad happens if we get a preemption just when we were
// about to do an apply().
TEST(ApplyInterruption) {
- v8::Locker lock(CcTest::default_isolate());
+ v8::Locker lock(CcTest::isolate());
v8::V8::Initialize();
- v8::HandleScope scope(CcTest::default_isolate());
+ v8::HandleScope scope(CcTest::isolate());
Local<Context> local_env;
{
LocalContext env;
@@ -14497,17 +14590,17 @@ static void MorphAString(i::String* string,
CHECK(i::StringShape(string).IsExternal());
if (string->IsOneByteRepresentation()) {
// Check old map is not internalized or long.
- CHECK(string->map() == HEAP->external_ascii_string_map());
+ CHECK(string->map() == CcTest::heap()->external_ascii_string_map());
// Morph external string to be TwoByte string.
- string->set_map(HEAP->external_string_map());
+ string->set_map(CcTest::heap()->external_string_map());
i::ExternalTwoByteString* morphed =
i::ExternalTwoByteString::cast(string);
morphed->set_resource(uc16_resource);
} else {
// Check old map is not internalized or long.
- CHECK(string->map() == HEAP->external_string_map());
+ CHECK(string->map() == CcTest::heap()->external_string_map());
// Morph external string to be ASCII string.
- string->set_map(HEAP->external_ascii_string_map());
+ string->set_map(CcTest::heap()->external_ascii_string_map());
i::ExternalAsciiString* morphed =
i::ExternalAsciiString::cast(string);
morphed->set_resource(ascii_resource);
@@ -14524,7 +14617,7 @@ THREADED_TEST(MorphCompositeStringTest) {
uint16_t* two_byte_string = AsciiToTwoByteString(c_string);
{
LocalContext env;
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(env->GetIsolate());
AsciiVectorResource ascii_resource(
i::Vector<const char>(c_string, i::StrLength(c_string)));
@@ -14612,148 +14705,85 @@ TEST(CompileExternalTwoByteSource) {
}
-class RegExpStringModificationTest {
- public:
- RegExpStringModificationTest()
- : block_(0),
- morphs_(0),
- morphs_during_regexp_(0),
- ascii_resource_(i::Vector<const char>("aaaaaaaaaaaaaab", 15)),
- uc16_resource_(i::Vector<const uint16_t>(two_byte_content_, 15)) {}
- ~RegExpStringModificationTest() {}
- void RunTest() {
- i::Factory* factory = i::Isolate::Current()->factory();
+#ifndef V8_INTERPRETED_REGEXP
- regexp_success_ = false;
- morph_success_ = false;
+struct RegExpInterruptionData {
+ int loop_count;
+ UC16VectorResource* string_resource;
+ v8::Persistent<v8::String> string;
+} regexp_interruption_data;
- // Initialize the contents of two_byte_content_ to be a uc16 representation
- // of "aaaaaaaaaaaaaab".
- for (int i = 0; i < 14; i++) {
- two_byte_content_[i] = 'a';
- }
- two_byte_content_[14] = 'b';
-
- // Create the input string for the regexp - the one we are going to change
- // properties of.
- input_ = factory->NewExternalStringFromAscii(&ascii_resource_);
-
- // Inject the input as a global variable.
- i::Handle<i::String> input_name =
- factory->NewStringFromAscii(i::Vector<const char>("input", 5));
- i::Isolate::Current()->native_context()->global_object()->SetProperty(
- *input_name,
- *input_,
- NONE,
- i::kNonStrictMode)->ToObjectChecked();
-
- MorphThread morph_thread(this);
- morph_thread.Start();
- v8::Locker::StartPreemption(1);
- LongRunningRegExp();
- {
- v8::Unlocker unlock(CcTest::default_isolate());
- morph_thread.Join();
+
+class RegExpInterruptionThread : public i::Thread {
+ public:
+ explicit RegExpInterruptionThread(v8::Isolate* isolate)
+ : Thread("TimeoutThread"), isolate_(isolate) {}
+
+ virtual void Run() {
+ for (regexp_interruption_data.loop_count = 0;
+ regexp_interruption_data.loop_count < 7;
+ regexp_interruption_data.loop_count++) {
+ i::OS::Sleep(50); // Wait a bit before requesting GC.
+ reinterpret_cast<i::Isolate*>(isolate_)->stack_guard()->RequestGC();
}
- v8::Locker::StopPreemption();
- CHECK(regexp_success_);
- CHECK(morph_success_);
+ i::OS::Sleep(50); // Wait a bit before terminating.
+ v8::V8::TerminateExecution(isolate_);
}
private:
- // Number of string modifications required.
- static const int kRequiredModifications = 5;
- static const int kMaxModifications = 100;
+ v8::Isolate* isolate_;
+};
- class MorphThread : public i::Thread {
- public:
- explicit MorphThread(RegExpStringModificationTest* test)
- : Thread("MorphThread"), test_(test) {}
- virtual void Run() {
- test_->MorphString();
- }
- private:
- RegExpStringModificationTest* test_;
- };
- void MorphString() {
- block_.Wait();
- while (morphs_during_regexp_ < kRequiredModifications &&
- morphs_ < kMaxModifications) {
- {
- v8::Locker lock(CcTest::default_isolate());
- // Swap string between ascii and two-byte representation.
- i::String* string = *input_;
- MorphAString(string, &ascii_resource_, &uc16_resource_);
- morphs_++;
- }
- i::OS::Sleep(1);
- }
- morph_success_ = true;
- }
+void RunBeforeGC(v8::GCType type, v8::GCCallbackFlags flags) {
+ if (regexp_interruption_data.loop_count != 2) return;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::String> string = v8::Local<v8::String>::New(
+ CcTest::isolate(), regexp_interruption_data.string);
+ string->MakeExternal(regexp_interruption_data.string_resource);
+}
- void LongRunningRegExp() {
- block_.Signal(); // Enable morphing thread on next preemption.
- while (morphs_during_regexp_ < kRequiredModifications &&
- morphs_ < kMaxModifications) {
- int morphs_before = morphs_;
- {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- // Match 15-30 "a"'s against 14 and a "b".
- const char* c_source =
- "/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/"
- ".exec(input) === null";
- Local<String> source = String::New(c_source);
- Local<Script> script = Script::Compile(source);
- Local<Value> result = script->Run();
- CHECK(result->IsTrue());
- }
- int morphs_after = morphs_;
- morphs_during_regexp_ += morphs_after - morphs_before;
- }
- regexp_success_ = true;
- }
- i::uc16 two_byte_content_[15];
- i::Semaphore block_;
- int morphs_;
- int morphs_during_regexp_;
- bool regexp_success_;
- bool morph_success_;
- i::Handle<i::String> input_;
- AsciiVectorResource ascii_resource_;
- UC16VectorResource uc16_resource_;
-};
+// Test that RegExp execution can be interrupted. Specifically, we test
+// * interrupting with GC
+// * turn the subject string from one-byte internal to two-byte external string
+// * force termination
+TEST(RegExpInterruption) {
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ RegExpInterruptionThread timeout_thread(CcTest::isolate());
-// Test that a regular expression execution can be interrupted and
-// the string changed without failing.
-TEST(RegExpStringModification) {
- v8::Locker lock(CcTest::default_isolate());
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::default_isolate());
- Local<Context> local_env;
- {
- LocalContext env;
- local_env = env.local();
- }
+ v8::V8::AddGCPrologueCallback(RunBeforeGC);
+ static const char* ascii_content = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ i::uc16* uc16_content = AsciiToTwoByteString(ascii_content);
+ v8::Local<v8::String> string = v8_str(ascii_content);
- // Local context should still be live.
- CHECK(!local_env.IsEmpty());
- local_env->Enter();
+ CcTest::global()->Set(v8_str("a"), string);
+ regexp_interruption_data.string.Reset(CcTest::isolate(), string);
+ regexp_interruption_data.string_resource = new UC16VectorResource(
+ i::Vector<const i::uc16>(uc16_content, i::StrLength(ascii_content)));
- // Should complete without problems.
- RegExpStringModificationTest().RunTest();
+ v8::TryCatch try_catch;
+ timeout_thread.Start();
- local_env->Exit();
+ CompileRun("/((a*)*)*b/.exec(a)");
+ CHECK(try_catch.HasTerminated());
+
+ timeout_thread.Join();
+
+ delete regexp_interruption_data.string_resource;
+ regexp_interruption_data.string.Dispose();
}
+#endif // V8_INTERPRETED_REGEXP
+
// Test that we cannot set a property on the global object if there
// is a read-only property in the prototype chain.
TEST(ReadOnlyPropertyInGlobalProto) {
i::FLAG_es5_readonly = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
LocalContext context(0, templ);
v8::Handle<v8::Object> global = context->Global();
@@ -14806,7 +14836,7 @@ TEST(ForceSet) {
force_set_set_count = 0;
pass_on_get = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
v8::Handle<v8::String> access_property = v8::String::New("a");
templ->SetAccessor(access_property, ForceSetGetter, ForceSetSetter);
@@ -14848,7 +14878,7 @@ TEST(ForceSetWithInterceptor) {
force_set_set_count = 0;
pass_on_get = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
templ->SetNamedPropertyHandler(ForceSetGetter, ForceSetInterceptSetter);
LocalContext context(NULL, templ);
@@ -14891,7 +14921,7 @@ TEST(ForceSetWithInterceptor) {
THREADED_TEST(ForceDelete) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
LocalContext context(NULL, templ);
v8::Handle<v8::Object> global = context->Global();
@@ -14926,7 +14956,7 @@ THREADED_TEST(ForceDeleteWithInterceptor) {
force_delete_interceptor_count = 0;
pass_on_delete = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
templ->SetNamedPropertyHandler(0, 0, 0, ForceDeleteDeleter);
LocalContext context(NULL, templ);
@@ -14979,14 +15009,14 @@ THREADED_TEST(ForceDeleteIC) {
TEST(InlinedFunctionAcrossContexts) {
i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
v8::Local<v8::Context> ctx1 = v8::Context::New(isolate);
v8::Local<v8::Context> ctx2 = v8::Context::New(isolate);
ctx1->Enter();
{
- v8::HandleScope inner_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope inner_scope(CcTest::isolate());
CompileRun("var G = 42; function foo() { return G; }");
v8::Local<v8::Value> foo = ctx1->Global()->Get(v8_str("foo"));
ctx2->Enter();
@@ -15027,16 +15057,15 @@ static v8::Local<Context> calling_context2;
static void GetCallingContextCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
- CHECK(Context::GetCurrent() == calling_context0);
CHECK(args.GetIsolate()->GetCurrentContext() == calling_context0);
- CHECK(Context::GetCalling() == calling_context1);
- CHECK(Context::GetEntered() == calling_context2);
+ CHECK(args.GetIsolate()->GetCallingContext() == calling_context1);
+ CHECK(args.GetIsolate()->GetEnteredContext() == calling_context2);
args.GetReturnValue().Set(42);
}
THREADED_TEST(GetCurrentContextWhenNotInContext) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
CHECK(isolate != NULL);
CHECK(isolate->context() == NULL);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -15048,7 +15077,7 @@ THREADED_TEST(GetCurrentContextWhenNotInContext) {
THREADED_TEST(GetCallingContext) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<Context> calling_context0(Context::New(isolate));
@@ -15153,7 +15182,7 @@ static void CheckElementValue(i::Isolate* isolate,
THREADED_TEST(PixelArray) {
LocalContext context;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 260;
@@ -15164,12 +15193,12 @@ THREADED_TEST(PixelArray) {
v8::kExternalPixelArray,
pixel_data));
// Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i % 256);
}
// Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(i % 256, pixels->get_scalar(i));
CHECK_EQ(i % 256, pixel_data[i]);
@@ -15567,7 +15596,7 @@ static void NotHandledIndexedPropertySetter(
THREADED_TEST(PixelArrayWithInterceptor) {
LocalContext context;
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
@@ -15740,7 +15769,7 @@ static void ObjectWithExternalArrayTestHelper(
"}"
"sum;");
// Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(28, result->Int32Value());
// Make sure out-of-range loads do not throw.
@@ -15933,7 +15962,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
int64_t low,
int64_t high) {
LocalContext context;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
const int kElementCount = 40;
@@ -15944,12 +15973,12 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
i::Handle<ExternalArrayClass>::cast(
factory->NewExternalArray(kElementCount, array_type, array_data));
// Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
for (int i = 0; i < kElementCount; i++) {
array->set(i, static_cast<ElementType>(i));
}
// Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(static_cast<int64_t>(i),
static_cast<int64_t>(array->get_scalar(i)));
@@ -16460,7 +16489,7 @@ void checkStackFrame(const char* expected_script_name,
const char* expected_func_name, int expected_line_number,
int expected_column, bool is_eval, bool is_constructor,
v8::Handle<v8::StackFrame> frame) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::String::Utf8Value func_name(frame->GetFunctionName());
v8::String::Utf8Value script_name(frame->GetScriptName());
if (*script_name == NULL) {
@@ -16532,7 +16561,7 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
// THREADED_TEST(CaptureStackTrace) {
TEST(CaptureStackTrace) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::String> origin = v8::String::New("capture-stack-trace-test");
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("AnalyzeStackInNativeCode"),
@@ -16781,7 +16810,7 @@ void AnalyzeStackOfEvalWithSourceURL(
TEST(SourceURLInStackTrace) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("AnalyzeStackOfEvalWithSourceURL"),
v8::FunctionTemplate::New(AnalyzeStackOfEvalWithSourceURL));
@@ -16823,7 +16852,7 @@ void AnalyzeScriptIdInStack(
TEST(ScriptIdInStackTrace) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("AnalyzeScriptIdInStack"),
v8::FunctionTemplate::New(AnalyzeScriptIdInStack));
@@ -16861,7 +16890,7 @@ void AnalyzeStackOfInlineScriptWithSourceURL(
TEST(InlineScriptWithSourceURLInStackTrace) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("AnalyzeStackOfInlineScriptWithSourceURL"),
v8::FunctionTemplate::New(
@@ -16906,7 +16935,7 @@ void AnalyzeStackOfDynamicScriptWithSourceURL(
TEST(DynamicWithSourceURLInStackTrace) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->Set(v8_str("AnalyzeStackOfDynamicScriptWithSourceURL"),
v8::FunctionTemplate::New(
@@ -16935,8 +16964,8 @@ TEST(DynamicWithSourceURLInStackTrace) {
static void CreateGarbageInOldSpace() {
- i::Factory* factory = i::Isolate::Current()->factory();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ i::Factory* factory = CcTest::i_isolate()->factory();
+ v8::HandleScope scope(CcTest::isolate());
i::AlwaysAllocateScope always_allocate;
for (int i = 0; i < 1000; i++) {
factory->NewFixedArray(1000, i::TENURED);
@@ -16949,15 +16978,15 @@ TEST(IdleNotification) {
const intptr_t MB = 1024 * 1024;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- intptr_t initial_size = HEAP->SizeOfObjects();
+ intptr_t initial_size = CcTest::heap()->SizeOfObjects();
CreateGarbageInOldSpace();
- intptr_t size_with_garbage = HEAP->SizeOfObjects();
+ intptr_t size_with_garbage = CcTest::heap()->SizeOfObjects();
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
finished = v8::V8::IdleNotification();
}
- intptr_t final_size = HEAP->SizeOfObjects();
+ intptr_t final_size = CcTest::heap()->SizeOfObjects();
CHECK(finished);
CHECK_LT(final_size, initial_size + 1);
}
@@ -16969,15 +16998,15 @@ TEST(IdleNotificationWithSmallHint) {
const int IdlePauseInMs = 900;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- intptr_t initial_size = HEAP->SizeOfObjects();
+ intptr_t initial_size = CcTest::heap()->SizeOfObjects();
CreateGarbageInOldSpace();
- intptr_t size_with_garbage = HEAP->SizeOfObjects();
+ intptr_t size_with_garbage = CcTest::heap()->SizeOfObjects();
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
finished = v8::V8::IdleNotification(IdlePauseInMs);
}
- intptr_t final_size = HEAP->SizeOfObjects();
+ intptr_t final_size = CcTest::heap()->SizeOfObjects();
CHECK(finished);
CHECK_LT(final_size, initial_size + 1);
}
@@ -16989,15 +17018,15 @@ TEST(IdleNotificationWithLargeHint) {
const int IdlePauseInMs = 900;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- intptr_t initial_size = HEAP->SizeOfObjects();
+ intptr_t initial_size = CcTest::heap()->SizeOfObjects();
CreateGarbageInOldSpace();
- intptr_t size_with_garbage = HEAP->SizeOfObjects();
+ intptr_t size_with_garbage = CcTest::heap()->SizeOfObjects();
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
finished = v8::V8::IdleNotification(IdlePauseInMs);
}
- intptr_t final_size = HEAP->SizeOfObjects();
+ intptr_t final_size = CcTest::heap()->SizeOfObjects();
CHECK(finished);
CHECK_LT(final_size, initial_size + 1);
}
@@ -17010,7 +17039,7 @@ TEST(Regress2107) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(env->GetIsolate());
- intptr_t initial_size = HEAP->SizeOfObjects();
+ intptr_t initial_size = CcTest::heap()->SizeOfObjects();
// Send idle notification to start a round of incremental GCs.
v8::V8::IdleNotification(kShortIdlePauseInMs);
// Emulate 7 page reloads.
@@ -17027,13 +17056,13 @@ TEST(Regress2107) {
}
// Create garbage and check that idle notification still collects it.
CreateGarbageInOldSpace();
- intptr_t size_with_garbage = HEAP->SizeOfObjects();
+ intptr_t size_with_garbage = CcTest::heap()->SizeOfObjects();
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
finished = v8::V8::IdleNotification(kShortIdlePauseInMs);
}
- intptr_t final_size = HEAP->SizeOfObjects();
+ intptr_t final_size = CcTest::heap()->SizeOfObjects();
CHECK_LT(final_size, initial_size + 1);
}
@@ -17042,7 +17071,7 @@ static uint32_t* stack_limit;
static void GetStackLimitCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
stack_limit = reinterpret_cast<uint32_t*>(
- i::Isolate::Current()->stack_guard()->real_climit());
+ CcTest::i_isolate()->stack_guard()->real_climit());
}
@@ -17088,7 +17117,7 @@ TEST(SetResourceConstraints) {
TEST(SetResourceConstraintsInThread) {
uint32_t* set_limit;
{
- v8::Locker locker(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
set_limit = ComputeStackLimit(stack_breathing_room);
// Set stack limit.
@@ -17097,7 +17126,7 @@ TEST(SetResourceConstraintsInThread) {
CHECK(v8::SetResourceConstraints(&constraints));
// Execute a script.
- v8::HandleScope scope(CcTest::default_isolate());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(GetStackLimitCallback);
@@ -17108,7 +17137,7 @@ TEST(SetResourceConstraintsInThread) {
CHECK(stack_limit == set_limit);
}
{
- v8::Locker locker(CcTest::default_isolate());
+ v8::Locker locker(CcTest::isolate());
CHECK(stack_limit == set_limit);
}
}
@@ -17181,10 +17210,10 @@ TEST(VisitExternalStrings) {
// Symbolized External.
resource[3] = new TestResource(AsciiToTwoByteString("Some other string"));
v8::Local<v8::String> string3 = v8::String::NewExternal(resource[3]);
- HEAP->CollectAllAvailableGarbage(); // Tenure string.
+ CcTest::heap()->CollectAllAvailableGarbage(); // Tenure string.
// Turn into a symbol.
i::Handle<i::String> string3_i = v8::Utils::OpenHandle(*string3);
- CHECK(!HEAP->InternalizeString(*string3_i)->IsFailure());
+ CHECK(!CcTest::heap()->InternalizeString(*string3_i)->IsFailure());
CHECK(string3_i->IsInternalizedString());
// We need to add usages for string* to avoid warnings in GCC 4.7
@@ -17327,7 +17356,7 @@ static void SpaghettiIncident(
// Test that an exception can be propagated down through a spaghetti
// stack using ReThrow.
THREADED_TEST(SpaghettiStackReThrow) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
context->Global()->Set(
v8::String::New("s"),
@@ -17354,7 +17383,7 @@ THREADED_TEST(SpaghettiStackReThrow) {
TEST(Regress528) {
v8::V8::Initialize();
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> other_context;
int gc_count;
@@ -17381,7 +17410,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_simple);
other_context->Exit();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -17403,7 +17432,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_eval);
other_context->Exit();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -17430,7 +17459,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_exception);
other_context->Exit();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -17475,6 +17504,70 @@ THREADED_TEST(FunctionGetInferredName) {
}
+THREADED_TEST(FunctionGetDisplayName) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ const char* code = "var error = false;"
+ "function a() { this.x = 1; };"
+ "a.displayName = 'display_a';"
+ "var b = (function() {"
+ " var f = function() { this.x = 2; };"
+ " f.displayName = 'display_b';"
+ " return f;"
+ "})();"
+ "var c = function() {};"
+ "c.__defineGetter__('displayName', function() {"
+ " error = true;"
+ " throw new Error();"
+ "});"
+ "function d() {};"
+ "d.__defineGetter__('displayName', function() {"
+ " error = true;"
+ " return 'wrong_display_name';"
+ "});"
+ "function e() {};"
+ "e.displayName = 'wrong_display_name';"
+ "e.__defineSetter__('displayName', function() {"
+ " error = true;"
+ " throw new Error();"
+ "});"
+ "function f() {};"
+ "f.displayName = { 'foo': 6, toString: function() {"
+ " error = true;"
+ " return 'wrong_display_name';"
+ "}};"
+ "var g = function() {"
+ " arguments.callee.displayName = 'set_in_runtime';"
+ "}; g();"
+ ;
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
+ v8::Script::Compile(v8::String::New(code), &origin)->Run();
+ v8::Local<v8::Value> error = env->Global()->Get(v8::String::New("error"));
+ v8::Local<v8::Function> a = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("a")));
+ v8::Local<v8::Function> b = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("b")));
+ v8::Local<v8::Function> c = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("c")));
+ v8::Local<v8::Function> d = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("d")));
+ v8::Local<v8::Function> e = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("e")));
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("g")));
+ CHECK_EQ(false, error->BooleanValue());
+ CHECK_EQ("display_a", *v8::String::Utf8Value(a->GetDisplayName()));
+ CHECK_EQ("display_b", *v8::String::Utf8Value(b->GetDisplayName()));
+ CHECK(c->GetDisplayName()->IsUndefined());
+ CHECK(d->GetDisplayName()->IsUndefined());
+ CHECK(e->GetDisplayName()->IsUndefined());
+ CHECK(f->GetDisplayName()->IsUndefined());
+ CHECK_EQ("set_in_runtime", *v8::String::Utf8Value(g->GetDisplayName()));
+}
+
+
THREADED_TEST(ScriptLineNumber) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -17508,6 +17601,23 @@ THREADED_TEST(ScriptColumnNumber) {
}
+THREADED_TEST(FunctionIsBuiltin) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Function> f;
+ f = v8::Local<v8::Function>::Cast(CompileRun("Math.floor"));
+ CHECK(f->IsBuiltin());
+ f = v8::Local<v8::Function>::Cast(CompileRun("Object"));
+ CHECK(f->IsBuiltin());
+ f = v8::Local<v8::Function>::Cast(CompileRun("Object.__defineSetter__"));
+ CHECK(f->IsBuiltin());
+ f = v8::Local<v8::Function>::Cast(CompileRun("Array.prototype.toString"));
+ CHECK(f->IsBuiltin());
+ f = v8::Local<v8::Function>::Cast(CompileRun("function a() {}; a;"));
+ CHECK(!f->IsBuiltin());
+}
+
+
THREADED_TEST(FunctionGetScriptId) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -17566,7 +17676,7 @@ void FooSetInterceptor(Local<String> name,
TEST(SetterOnConstructorPrototype) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetAccessor(v8_str("x"),
GetterWhichReturns42,
@@ -17618,7 +17728,7 @@ static void NamedPropertySetterWhichSetsYOnThisTo23(
THREADED_TEST(InterceptorOnConstructorPrototype) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(NamedPropertyGetterWhichReturns42,
NamedPropertySetterWhichSetsYOnThisTo23);
@@ -17696,58 +17806,134 @@ TEST(Regress618) {
}
}
+v8::Isolate* gc_callbacks_isolate = NULL;
int prologue_call_count = 0;
int epilogue_call_count = 0;
int prologue_call_count_second = 0;
int epilogue_call_count_second = 0;
-void PrologueCallback(v8::GCType, v8::GCCallbackFlags) {
+void PrologueCallback(v8::GCType, v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
++prologue_call_count;
}
-void EpilogueCallback(v8::GCType, v8::GCCallbackFlags) {
+void PrologueCallback(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++prologue_call_count;
+}
+
+
+void EpilogueCallback(v8::GCType, v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
++epilogue_call_count;
}
-void PrologueCallbackSecond(v8::GCType, v8::GCCallbackFlags) {
+void EpilogueCallback(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++epilogue_call_count;
+}
+
+
+void PrologueCallbackSecond(v8::GCType, v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ ++prologue_call_count_second;
+}
+
+
+void PrologueCallbackSecond(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
++prologue_call_count_second;
}
-void EpilogueCallbackSecond(v8::GCType, v8::GCCallbackFlags) {
+void EpilogueCallbackSecond(v8::GCType, v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
++epilogue_call_count_second;
}
-TEST(GCCallbacks) {
+void EpilogueCallbackSecond(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++epilogue_call_count_second;
+}
+
+
+TEST(GCCallbacksOld) {
LocalContext context;
v8::V8::AddGCPrologueCallback(PrologueCallback);
v8::V8::AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
v8::V8::RemoveGCPrologueCallback(PrologueCallback);
v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(2, prologue_call_count);
+ CHECK_EQ(2, epilogue_call_count);
+ CHECK_EQ(2, prologue_call_count_second);
+ CHECK_EQ(2, epilogue_call_count_second);
+}
+
+
+TEST(GCCallbacks) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ gc_callbacks_isolate = isolate;
+ isolate->AddGCPrologueCallback(PrologueCallback);
+ isolate->AddGCEpilogueCallback(EpilogueCallback);
+ CHECK_EQ(0, prologue_call_count);
+ CHECK_EQ(0, epilogue_call_count);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(1, prologue_call_count);
+ CHECK_EQ(1, epilogue_call_count);
+ isolate->AddGCPrologueCallback(PrologueCallbackSecond);
+ isolate->AddGCEpilogueCallback(EpilogueCallbackSecond);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(2, prologue_call_count);
+ CHECK_EQ(2, epilogue_call_count);
+ CHECK_EQ(1, prologue_call_count_second);
+ CHECK_EQ(1, epilogue_call_count_second);
+ isolate->RemoveGCPrologueCallback(PrologueCallback);
+ isolate->RemoveGCEpilogueCallback(EpilogueCallback);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(2, prologue_call_count);
+ CHECK_EQ(2, epilogue_call_count);
+ CHECK_EQ(2, prologue_call_count_second);
+ CHECK_EQ(2, epilogue_call_count_second);
+ isolate->RemoveGCPrologueCallback(PrologueCallbackSecond);
+ isolate->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
@@ -17758,7 +17944,7 @@ TEST(GCCallbacks) {
THREADED_TEST(AddToJSFunctionResultCache) {
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
@@ -17776,7 +17962,7 @@ THREADED_TEST(AddToJSFunctionResultCache) {
" return 'Different results for ' + key1 + ': ' + r1 + ' vs. ' + r1_;"
" return 'PASSED';"
"})()";
- HEAP->ClearJSFunctionResultCaches();
+ CcTest::heap()->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
@@ -17799,7 +17985,7 @@ THREADED_TEST(FillJSFunctionResultCache) {
" return 'FAILED: k0CacheSize is too small';"
" return 'PASSED';"
"})()";
- HEAP->ClearJSFunctionResultCaches();
+ CcTest::heap()->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
@@ -17823,7 +18009,7 @@ THREADED_TEST(RoundRobinGetFromCache) {
" };"
" return 'PASSED';"
"})()";
- HEAP->ClearJSFunctionResultCaches();
+ CcTest::heap()->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
@@ -17847,7 +18033,7 @@ THREADED_TEST(ReverseGetFromCache) {
" };"
" return 'PASSED';"
"})()";
- HEAP->ClearJSFunctionResultCaches();
+ CcTest::heap()->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
@@ -17864,7 +18050,7 @@ THREADED_TEST(TestEviction) {
" };"
" return 'PASSED';"
"})()";
- HEAP->ClearJSFunctionResultCaches();
+ CcTest::heap()->ClearJSFunctionResultCaches();
ExpectString(code, "PASSED");
}
@@ -17961,7 +18147,7 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
TEST(ContainsOnlyOneByte) {
v8::V8::Initialize();
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
// Make a buffer long enough that it won't automatically be converted.
const int length = 512;
@@ -18030,7 +18216,7 @@ TEST(ContainsOnlyOneByte) {
void FailedAccessCheckCallbackGC(Local<v8::Object> target,
v8::AccessType type,
Local<v8::Value> data) {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -18041,7 +18227,7 @@ TEST(GCInFailedAccessCheckCallback) {
v8::V8::Initialize();
v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckCallbackGC);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
@@ -18112,116 +18298,28 @@ TEST(GCInFailedAccessCheckCallback) {
}
-TEST(DefaultIsolateGetCurrent) {
- CHECK(v8::Isolate::GetCurrent() != NULL);
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- CHECK(reinterpret_cast<i::Isolate*>(isolate)->IsDefaultIsolate());
- printf("*** %s\n", "DefaultIsolateGetCurrent success");
-}
-
-
TEST(IsolateNewDispose) {
- v8::Isolate* current_isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* current_isolate = CcTest::isolate();
v8::Isolate* isolate = v8::Isolate::New();
CHECK(isolate != NULL);
CHECK(!reinterpret_cast<i::Isolate*>(isolate)->IsDefaultIsolate());
CHECK(current_isolate != isolate);
- CHECK(current_isolate == v8::Isolate::GetCurrent());
-
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
- last_location = last_message = NULL;
- isolate->Dispose();
- CHECK_EQ(last_location, NULL);
- CHECK_EQ(last_message, NULL);
-}
-
-
-TEST(IsolateEnterExitDefault) {
- v8::Isolate* current_isolate = v8::Isolate::GetCurrent();
- CHECK(current_isolate != NULL); // Default isolate.
- v8::HandleScope scope(current_isolate);
- LocalContext context;
- ExpectString("'hello'", "hello");
- current_isolate->Enter();
- ExpectString("'still working'", "still working");
- current_isolate->Exit();
- ExpectString("'still working 2'", "still working 2");
- current_isolate->Exit();
- // Default isolate is always, well, 'default current'.
- CHECK_EQ(v8::Isolate::GetCurrent(), current_isolate);
- // Still working since default isolate is auto-entering any thread
- // that has no isolate and attempts to execute V8 APIs.
- ExpectString("'still working 3'", "still working 3");
-}
-
-
-TEST(DisposeDefaultIsolate) {
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
-
- // Run some V8 code to trigger default isolate to become 'current'.
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- LocalContext context;
- ExpectString("'run some V8'", "run some V8");
-
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- CHECK(reinterpret_cast<i::Isolate*>(isolate)->IsDefaultIsolate());
- last_location = last_message = NULL;
- isolate->Dispose();
- // It is not possible to dispose default isolate via Isolate API.
- CHECK_NE(last_location, NULL);
- CHECK_NE(last_message, NULL);
-}
-
-
-TEST(RunDefaultAndAnotherIsolate) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- LocalContext context;
-
- // Enter new isolate.
- v8::Isolate* isolate = v8::Isolate::New();
- CHECK(isolate);
- isolate->Enter();
- { // Need this block because subsequent Exit() will deallocate Heap,
- // so we need all scope objects to be deconstructed when it happens.
- v8::HandleScope scope_new(isolate);
- LocalContext context_new;
-
- // Run something in new isolate.
- CompileRun("var foo = 153;");
- ExpectTrue("function f() { return foo == 153; }; f()");
- }
- isolate->Exit();
-
- // This runs automatically in default isolate.
- // Variables in another isolate should be not available.
- ExpectTrue("function f() {"
- " try {"
- " foo;"
- " return false;"
- " } catch(e) {"
- " return true;"
- " }"
- "};"
- "var bar = 371;"
- "f()");
+ CHECK(current_isolate == CcTest::isolate());
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = NULL;
isolate->Dispose();
CHECK_EQ(last_location, NULL);
CHECK_EQ(last_message, NULL);
-
- // Check that default isolate still runs.
- ExpectTrue("function f() { return bar == 371; }; f()");
}
-TEST(DisposeIsolateWhenInUse) {
+UNINITIALIZED_TEST(DisposeIsolateWhenInUse) {
v8::Isolate* isolate = v8::Isolate::New();
CHECK(isolate);
isolate->Enter();
v8::HandleScope scope(isolate);
- LocalContext context;
+ LocalContext context(isolate);
// Run something in this isolate.
ExpectTrue("true");
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
@@ -18284,16 +18382,16 @@ TEST(RunTwoIsolatesOnSingleThread) {
// Run some stuff in default isolate.
v8::Persistent<v8::Context> context_default;
{
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Isolate::Scope iscope(isolate);
v8::HandleScope scope(isolate);
context_default.Reset(isolate, Context::New(isolate));
}
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(v8::Isolate::GetCurrent(), context_default);
+ v8::Local<v8::Context>::New(CcTest::isolate(), context_default);
v8::Context::Scope context_scope(context);
// Variables in other isolates should be not available, verify there
// is an exception.
@@ -18313,7 +18411,7 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::Isolate::Scope iscope(isolate2);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(isolate2);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate2, context2);
v8::Context::Scope context_scope(context);
@@ -18349,9 +18447,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
// Check that default isolate still runs.
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(v8::Isolate::GetCurrent(), context_default);
+ v8::Local<v8::Context>::New(CcTest::isolate(), context_default);
v8::Context::Scope context_scope(context);
ExpectTrue("function f() { return isDefaultIsolate; }; f()");
}
@@ -18361,7 +18459,7 @@ TEST(RunTwoIsolatesOnSingleThread) {
static int CalcFibonacci(v8::Isolate* isolate, int limit) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope scope(isolate);
- LocalContext context;
+ LocalContext context(isolate);
i::ScopedVector<char> code(1024);
i::OS::SNPrintF(code, "function fib(n) {"
" if (n <= 2) return 1;"
@@ -18405,8 +18503,8 @@ TEST(MultipleIsolatesOnIndividualThreads) {
thread1.Start();
thread2.Start();
- int result1 = CalcFibonacci(v8::Isolate::GetCurrent(), 21);
- int result2 = CalcFibonacci(v8::Isolate::GetCurrent(), 12);
+ int result1 = CalcFibonacci(CcTest::isolate(), 21);
+ int result2 = CalcFibonacci(CcTest::isolate(), 12);
thread1.Join();
thread2.Join();
@@ -18463,6 +18561,8 @@ class InitDefaultIsolateThread : public v8::internal::Thread {
result_(false) { }
void Run() {
+ v8::Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
switch (testCase_) {
case IgnoreOOM:
v8::V8::IgnoreOutOfMemoryException();
@@ -18493,6 +18593,8 @@ class InitDefaultIsolateThread : public v8::internal::Thread {
v8::V8::SetAddHistogramSampleFunction(NULL);
break;
}
+ isolate->Exit();
+ isolate->Dispose();
result_ = true;
}
@@ -18644,7 +18746,7 @@ TEST(DontDeleteCellLoadIC) {
"})()",
"ReferenceError: cell is not defined");
CompileRun("cell = \"new_second\";");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
ExpectString("readCell()", "new_second");
ExpectString("readCell()", "new_second");
}
@@ -18716,7 +18818,7 @@ class Visitor42 : public v8::PersistentHandleVisitor {
uint16_t class_id) {
if (class_id != 42) return;
CHECK_EQ(42, value->WrapperClassId());
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::Value> handle = v8::Local<v8::Value>::New(isolate, *value);
v8::Handle<v8::Value> object =
@@ -18769,7 +18871,7 @@ TEST(PersistentHandleInNewSpaceVisitor) {
object1.SetWrapperClassId(42);
CHECK_EQ(42, object1.WrapperClassId());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
v8::Persistent<v8::Object> object2(isolate, v8::Object::New());
CHECK_EQ(0, object2.WrapperClassId());
@@ -18931,12 +19033,12 @@ static void CheckContextId(v8::Handle<Object> object, int expected) {
THREADED_TEST(CreationContext) {
- HandleScope handle_scope(v8::Isolate::GetCurrent());
- Handle<Context> context1 = Context::New(v8::Isolate::GetCurrent());
+ HandleScope handle_scope(CcTest::isolate());
+ Handle<Context> context1 = Context::New(CcTest::isolate());
InstallContextId(context1, 1);
- Handle<Context> context2 = Context::New(v8::Isolate::GetCurrent());
+ Handle<Context> context2 = Context::New(CcTest::isolate());
InstallContextId(context2, 2);
- Handle<Context> context3 = Context::New(v8::Isolate::GetCurrent());
+ Handle<Context> context3 = Context::New(CcTest::isolate());
InstallContextId(context3, 3);
Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New();
@@ -19014,8 +19116,8 @@ THREADED_TEST(CreationContext) {
THREADED_TEST(CreationContextOfJsFunction) {
- HandleScope handle_scope(v8::Isolate::GetCurrent());
- Handle<Context> context = Context::New(v8::Isolate::GetCurrent());
+ HandleScope handle_scope(CcTest::isolate());
+ Handle<Context> context = Context::New(CcTest::isolate());
InstallContextId(context, 1);
Local<Object> function;
@@ -19144,7 +19246,7 @@ TEST(HasOwnProperty) {
TEST(IndexedInterceptorWithStringProto) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(NULL,
NULL,
@@ -19270,7 +19372,7 @@ THREADED_TEST(CallAPIFunctionOnNonObject) {
// Regression test for issue 1470.
THREADED_TEST(ReadOnlyIndexedProperties) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Local<ObjectTemplate> templ = ObjectTemplate::New();
LocalContext context;
@@ -19300,15 +19402,15 @@ THREADED_TEST(Regress1516) {
int elements;
{ i::MapCache* map_cache =
- i::MapCache::cast(i::Isolate::Current()->context()->map_cache());
+ i::MapCache::cast(CcTest::i_isolate()->context()->map_cache());
elements = map_cache->NumberOfElements();
CHECK_LE(1, elements);
}
- i::Isolate::Current()->heap()->CollectAllGarbage(
+ CcTest::heap()->CollectAllGarbage(
i::Heap::kAbortIncrementalMarkingMask);
- { i::Object* raw_map_cache = i::Isolate::Current()->context()->map_cache();
- if (raw_map_cache != i::Isolate::Current()->heap()->undefined_value()) {
+ { i::Object* raw_map_cache = CcTest::i_isolate()->context()->map_cache();
+ if (raw_map_cache != CcTest::heap()->undefined_value()) {
i::MapCache* map_cache = i::MapCache::cast(raw_map_cache);
CHECK_GT(elements, map_cache->NumberOfElements());
}
@@ -19335,7 +19437,7 @@ static bool BlockProtoNamedSecurityTestCallback(Local<v8::Object> global,
THREADED_TEST(Regress93759) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
// Template for object with security check.
@@ -19407,25 +19509,25 @@ THREADED_TEST(Regress93759) {
CHECK(result1->Equals(simple_object->GetPrototype()));
Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
- CHECK(result2->Equals(Undefined()));
+ CHECK(result2->Equals(Undefined(isolate)));
Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
CHECK(result3->Equals(global_object->GetPrototype()));
Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
- CHECK(result4->Equals(Undefined()));
+ CHECK(result4->Equals(Undefined(isolate)));
Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
CHECK(result5->Equals(
object_with_hidden->GetPrototype()->ToObject()->GetPrototype()));
Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
- CHECK(result6->Equals(Undefined()));
+ CHECK(result6->Equals(Undefined(isolate)));
}
THREADED_TEST(Regress125988) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> intercept = FunctionTemplate::New();
AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
@@ -19459,7 +19561,7 @@ static void TestReceiver(Local<Value> expected_result,
THREADED_TEST(ForeignFunctionReceiver) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
// Create two contexts with different "id" properties ('i' and 'o').
@@ -19618,13 +19720,13 @@ TEST(CallCompletedCallback) {
void CallCompletedCallbackNoException() {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CompileRun("1+1;");
}
void CallCompletedCallbackException() {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CompileRun("throw 'second exception';");
}
@@ -19715,26 +19817,24 @@ TEST(PrimaryStubCache) {
TEST(StaticGetters) {
LocalContext context;
- i::Factory* factory = i::Isolate::Current()->factory();
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ i::Factory* factory = CcTest::i_isolate()->factory();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
i::Handle<i::Object> undefined_value = factory->undefined_value();
- CHECK(*v8::Utils::OpenHandle(*v8::Undefined()) == *undefined_value);
CHECK(*v8::Utils::OpenHandle(*v8::Undefined(isolate)) == *undefined_value);
i::Handle<i::Object> null_value = factory->null_value();
- CHECK(*v8::Utils::OpenHandle(*v8::Null()) == *null_value);
CHECK(*v8::Utils::OpenHandle(*v8::Null(isolate)) == *null_value);
i::Handle<i::Object> true_value = factory->true_value();
- CHECK(*v8::Utils::OpenHandle(*v8::True()) == *true_value);
CHECK(*v8::Utils::OpenHandle(*v8::True(isolate)) == *true_value);
i::Handle<i::Object> false_value = factory->false_value();
- CHECK(*v8::Utils::OpenHandle(*v8::False()) == *false_value);
CHECK(*v8::Utils::OpenHandle(*v8::False(isolate)) == *false_value);
}
-TEST(IsolateEmbedderData) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+UNINITIALIZED_TEST(IsolateEmbedderData) {
+ CcTest::DisableAutomaticDispose();
+ v8::Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
CHECK_EQ(NULL, isolate->GetData());
CHECK_EQ(NULL, i_isolate->GetData());
@@ -19746,16 +19846,15 @@ TEST(IsolateEmbedderData) {
i_isolate->SetData(data2);
CHECK_EQ(data2, isolate->GetData());
CHECK_EQ(data2, i_isolate->GetData());
- i_isolate->TearDown();
- CHECK_EQ(data2, isolate->GetData());
- CHECK_EQ(data2, i_isolate->GetData());
+ isolate->Exit();
+ isolate->Dispose();
}
TEST(StringEmpty) {
LocalContext context;
- i::Factory* factory = i::Isolate::Current()->factory();
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ i::Factory* factory = CcTest::i_isolate()->factory();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
i::Handle<i::Object> empty_string = factory->empty_string();
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty()) == *empty_string);
@@ -20029,7 +20128,7 @@ static void Helper137002(bool do_store,
THREADED_TEST(Regress137002a) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_compilation_cache = false;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
for (int i = 0; i < 16; i++) {
Helper137002(i & 8, i & 4, i & 2, i & 1);
}
@@ -20170,10 +20269,11 @@ THREADED_TEST(Regress2535) {
THREADED_TEST(Regress2746) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
Local<Object> obj = Object::New();
Local<String> key = String::New("key");
- obj->SetHiddenValue(key, v8::Undefined());
+ obj->SetHiddenValue(key, v8::Undefined(isolate));
Local<Value> value = obj->GetHiddenValue(key);
CHECK(!value.IsEmpty());
CHECK(value->IsUndefined());
@@ -20297,7 +20397,7 @@ void UnreachableCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(JSONStringifyAccessCheck) {
v8::V8::Initialize();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
@@ -20353,7 +20453,8 @@ void FailedAccessCheckThrows(Local<v8::Object> target,
Local<v8::Value> data) {
access_check_fail_thrown = true;
i::PrintF("Access check failed. Error thrown.\n");
- v8::ThrowException(v8::Exception::Error(v8_str("cross context")));
+ CcTest::isolate()->ThrowException(
+ v8::Exception::Error(v8_str("cross context")));
}
@@ -20394,7 +20495,7 @@ TEST(AccessCheckThrows) {
i::FLAG_allow_natives_syntax = true;
v8::V8::Initialize();
v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
@@ -20470,7 +20571,7 @@ THREADED_TEST(Regress256330) {
THREADED_TEST(CrankshaftInterceptorSetter) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -20496,7 +20597,7 @@ THREADED_TEST(CrankshaftInterceptorSetter) {
THREADED_TEST(CrankshaftInterceptorGetter) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -20519,7 +20620,7 @@ THREADED_TEST(CrankshaftInterceptorGetter) {
THREADED_TEST(CrankshaftInterceptorFieldRead) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -20539,7 +20640,7 @@ THREADED_TEST(CrankshaftInterceptorFieldRead) {
THREADED_TEST(CrankshaftInterceptorFieldWrite) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
Handle<FunctionTemplate> templ = FunctionTemplate::New();
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
@@ -20584,6 +20685,36 @@ THREADED_TEST(FunctionNew) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Object* elm = i_isolate->native_context()->function_cache()
->GetElementNoExceptionThrown(i_isolate, serial_number);
- CHECK(elm->IsNull());
+ CHECK(elm->IsUndefined());
+ // Verify that each Function::New creates a new function instance
+ Local<Object> data2 = v8::Object::New();
+ function_new_expected_env = data2;
+ Local<Function> func2 = Function::New(isolate, FunctionNewCallback, data2);
+ CHECK(!func2->IsNull());
+ CHECK_NE(func, func2);
+ env->Global()->Set(v8_str("func2"), func2);
+ Local<Value> result2 = CompileRun("func2();");
+ CHECK_EQ(v8::Integer::New(17, isolate), result2);
}
+
+TEST(EscapeableHandleScope) {
+ HandleScope outer_scope(CcTest::isolate());
+ LocalContext context;
+ const int runs = 10;
+ Local<String> values[runs];
+ for (int i = 0; i < runs; i++) {
+ v8::EscapableHandleScope inner_scope(CcTest::isolate());
+ Local<String> value;
+ if (i != 0) value = v8_str("escape value");
+ values[i] = inner_scope.Escape(value);
+ }
+ for (int i = 0; i < runs; i++) {
+ Local<String> expected;
+ if (i != 0) {
+ CHECK_EQ(v8_str("escape value"), values[i]);
+ } else {
+ CHECK(values[i].IsEmpty());
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 1a4c1ae369..69ea6f4742 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -47,7 +47,7 @@ typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
TEST(0) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -74,7 +74,7 @@ TEST(0) {
TEST(1) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -112,7 +112,7 @@ TEST(1) {
TEST(2) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -159,7 +159,7 @@ TEST(2) {
TEST(3) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -215,7 +215,7 @@ TEST(3) {
TEST(4) {
// Test the VFP floating point instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -363,7 +363,7 @@ TEST(4) {
TEST(5) {
// Test the ARMv7 bitfield instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -400,7 +400,7 @@ TEST(5) {
TEST(6) {
// Test saturating instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -443,8 +443,7 @@ static void TestRoundingMode(VCVTTypes types,
double value,
int expected,
bool expected_exception = false) {
- CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -511,6 +510,7 @@ static void TestRoundingMode(VCVTTypes types,
TEST(7) {
+ CcTest::InitializeVM();
// Test vfp rounding modes.
// s32_f64 (double to integer).
@@ -623,7 +623,7 @@ TEST(7) {
TEST(8) {
// Test VFP multi load/store with ia_w.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -731,7 +731,7 @@ TEST(8) {
TEST(9) {
// Test VFP multi load/store with ia.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -843,7 +843,7 @@ TEST(9) {
TEST(10) {
// Test VFP multi load/store with db_w.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -951,7 +951,7 @@ TEST(10) {
TEST(11) {
// Test instructions using the carry flag.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1017,7 +1017,7 @@ TEST(11) {
TEST(12) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -1032,7 +1032,7 @@ TEST(12) {
TEST(13) {
// Test VFP instructions using registers d16-d31.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
if (!CpuFeatures::IsSupported(VFP32DREGS)) {
@@ -1160,7 +1160,7 @@ TEST(13) {
TEST(14) {
// Test the VFP Canonicalized Nan mode.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1240,7 +1240,7 @@ TEST(14) {
TEST(15) {
// Test the Neon instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1347,7 +1347,7 @@ TEST(15) {
TEST(16) {
// Test the pkh, uxtb, uxtab and uxtb16 instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1423,7 +1423,7 @@ TEST(17) {
// Test generating labels at high addresses.
// Should not assert.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
// Generate a code segment that will be longer than 2^24 bytes.
@@ -1443,7 +1443,7 @@ TEST(code_relative_offset) {
// Test extracting the offset of a label from the beginning of the code
// in a register.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
// Initialize a code object that will contain the code.
Handle<Object> code_object(isolate->heap()->undefined_value(), isolate);
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 76eecc02e7..d40156841e 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -264,15 +264,15 @@ TEST(AssemblerIa326) {
Assembler assm(isolate, buffer, sizeof buffer);
CpuFeatureScope fscope(&assm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
- __ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
__ addsd(xmm0, xmm1);
__ mulsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
__ divsd(xmm0, xmm1);
// Copy xmm0 to st(0) using eight bytes of stack.
__ sub(esp, Immediate(8));
- __ movdbl(Operand(esp, 0), xmm0);
+ __ movsd(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(8));
__ ret(0);
@@ -313,7 +313,7 @@ TEST(AssemblerIa328) {
__ cvtsi2sd(xmm0, eax);
// Copy xmm0 to st(0) using eight bytes of stack.
__ sub(esp, Immediate(8));
- __ movdbl(Operand(esp, 0), xmm0);
+ __ movsd(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(8));
__ ret(0);
@@ -532,7 +532,7 @@ TEST(StackAlignmentForSSE2) {
CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
@@ -564,4 +564,39 @@ TEST(StackAlignmentForSSE2) {
#endif // __GNUC__
+TEST(AssemblerIa32Extractps) {
+ CcTest::InitializeVM();
+ if (!CpuFeatures::IsSupported(SSE2) ||
+ !CpuFeatures::IsSupported(SSE4_1)) return;
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[256];
+ MacroAssembler assm(isolate, buffer, sizeof buffer);
+ { CpuFeatureScope fscope2(&assm, SSE2);
+ CpuFeatureScope fscope41(&assm, SSE4_1);
+ __ movsd(xmm1, Operand(esp, 4));
+ __ extractps(eax, xmm1, 0x1);
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked());
+ CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+ Code::cast(code)->Print();
+#endif
+
+ F4 f = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ uint64_t value1 = V8_2PART_UINT64_C(0x12345678, 87654321);
+ CHECK_EQ(0x12345678, f(uint64_to_double(value1)));
+ uint64_t value2 = V8_2PART_UINT64_C(0x87654321, 12345678);
+ CHECK_EQ(0x87654321, f(uint64_to_double(value2)));
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 54ec43f18b..e8e724c052 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -49,7 +49,7 @@ typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
TEST(MIPS0) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0);
@@ -61,7 +61,7 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -75,7 +75,7 @@ TEST(MIPS0) {
TEST(MIPS1) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0);
@@ -100,7 +100,7 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -114,7 +114,7 @@ TEST(MIPS1) {
TEST(MIPS2) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0);
@@ -241,7 +241,7 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -256,7 +256,7 @@ TEST(MIPS2) {
TEST(MIPS3) {
// Test floating point instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -314,7 +314,7 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -346,7 +346,7 @@ TEST(MIPS3) {
TEST(MIPS4) {
// Test moves between floating point and integer registers.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -382,7 +382,7 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -403,7 +403,7 @@ TEST(MIPS4) {
TEST(MIPS5) {
// Test conversions between doubles and integers.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -448,7 +448,7 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -471,7 +471,7 @@ TEST(MIPS5) {
TEST(MIPS6) {
// Test simple memory loads and stores.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -521,7 +521,7 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -544,7 +544,7 @@ TEST(MIPS6) {
TEST(MIPS7) {
// Test floating point compare and branch instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -598,7 +598,7 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -622,7 +622,7 @@ TEST(MIPS7) {
TEST(MIPS8) {
// Test ROTR and ROTRV instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -697,7 +697,7 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -727,7 +727,7 @@ TEST(MIPS8) {
TEST(MIPS9) {
// Test BRANCH improvements.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0);
@@ -745,7 +745,7 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -757,7 +757,7 @@ TEST(MIPS10) {
// Test conversions between doubles and long integers.
// Test hos the long ints map to FP regs pairs.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -805,7 +805,7 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -830,7 +830,7 @@ TEST(MIPS10) {
TEST(MIPS11) {
// Test LWL, LWR, SWL and SWR instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -938,7 +938,7 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -974,7 +974,7 @@ TEST(MIPS11) {
TEST(MIPS12) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1043,7 +1043,7 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -1066,7 +1066,7 @@ TEST(MIPS12) {
TEST(MIPS13) {
// Test Cvt_d_uw and Trunc_uw_d macros.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1100,7 +1100,7 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -1125,7 +1125,7 @@ TEST(MIPS13) {
TEST(MIPS14) {
// Test round, floor, ceil, trunc, cvt.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
#define ROUND_STRUCT_ELEMENT(x) \
@@ -1221,7 +1221,7 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
@@ -1258,7 +1258,7 @@ TEST(MIPS14) {
TEST(MIPS15) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index f7d2311192..cd1ed2823b 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -35,34 +35,7 @@
#include "serialize.h"
#include "cctest.h"
-using v8::internal::Assembler;
-using v8::internal::Code;
-using v8::internal::CodeDesc;
-using v8::internal::FUNCTION_CAST;
-using v8::internal::Immediate;
-using v8::internal::Isolate;
-using v8::internal::Label;
-using v8::internal::OS;
-using v8::internal::Operand;
-using v8::internal::byte;
-using v8::internal::greater;
-using v8::internal::less_equal;
-using v8::internal::equal;
-using v8::internal::not_equal;
-using v8::internal::r13;
-using v8::internal::r15;
-using v8::internal::r8;
-using v8::internal::r9;
-using v8::internal::rax;
-using v8::internal::rbx;
-using v8::internal::rbp;
-using v8::internal::rcx;
-using v8::internal::rdi;
-using v8::internal::rdx;
-using v8::internal::rsi;
-using v8::internal::rsp;
-using v8::internal::times_1;
-using v8::internal::xmm0;
+using namespace v8::internal;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@@ -77,13 +50,16 @@ using v8::internal::xmm0;
typedef int (*F0)();
typedef int (*F1)(int64_t x);
typedef int (*F2)(int64_t x, int64_t y);
+typedef int (*F3)(double x);
+typedef int64_t (*F4)(int64_t* x, int64_t* y);
+typedef int64_t (*F5)(int64_t x);
#ifdef _WIN64
-static const v8::internal::Register arg1 = rcx;
-static const v8::internal::Register arg2 = rdx;
+static const Register arg1 = rcx;
+static const Register arg2 = rdx;
#else
-static const v8::internal::Register arg1 = rdi;
-static const v8::internal::Register arg2 = rsi;
+static const Register arg1 = rdi;
+static const Register arg2 = rsi;
#endif
#define __ assm.
@@ -96,7 +72,7 @@ TEST(AssemblerX64ReturnOperation) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, arg2);
@@ -118,7 +94,7 @@ TEST(AssemblerX64StackOperations) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
@@ -150,7 +126,7 @@ TEST(AssemblerX64ArithmeticOperations) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that adds arguments returning the sum.
__ movq(rax, arg2);
@@ -172,7 +148,7 @@ TEST(AssemblerX64ImulOperation) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that multiplies arguments returning the high
// word.
@@ -193,6 +169,157 @@ TEST(AssemblerX64ImulOperation) {
}
+TEST(AssemblerX64XchglOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ __ movq(rax, Operand(arg1, 0));
+ __ movq(rbx, Operand(arg2, 0));
+ __ xchgl(rax, rbx);
+ __ movq(Operand(arg1, 0), rax);
+ __ movq(Operand(arg2, 0), rbx);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ int64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
+ int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 40000000), left);
+ CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 20000000), right);
+ USE(result);
+}
+
+
+TEST(AssemblerX64OrlOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ __ movq(rax, Operand(arg2, 0));
+ __ orl(Operand(arg1, 0), rax);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ int64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
+ int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 60000000), left);
+ USE(result);
+}
+
+
+TEST(AssemblerX64RollOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ __ movq(rax, arg1);
+ __ roll(rax, Immediate(1));
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int64_t src = V8_2PART_UINT64_C(0x10000000, C0000000);
+ int64_t result = FUNCTION_CAST<F5>(buffer)(src);
+ CHECK_EQ(V8_2PART_UINT64_C(0x00000000, 80000001), result);
+}
+
+
+TEST(AssemblerX64SublOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ __ movq(rax, Operand(arg2, 0));
+ __ subl(Operand(arg1, 0), rax);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ int64_t right = V8_2PART_UINT64_C(0x30000000, 40000000);
+ int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ CHECK_EQ(V8_2PART_UINT64_C(0x10000000, e0000000), left);
+ USE(result);
+}
+
+
+TEST(AssemblerX64TestlOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ // Set rax with the ZF flag of the testl instruction.
+ Label done;
+ __ movq(rax, Immediate(1));
+ __ movq(rbx, Operand(arg2, 0));
+ __ testl(Operand(arg1, 0), rbx);
+ __ j(zero, &done, Label::kNear);
+ __ movq(rax, Immediate(0));
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ int64_t right = V8_2PART_UINT64_C(0x30000000, 00000000);
+ int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ CHECK_EQ(static_cast<int64_t>(1), result);
+}
+
+
+TEST(AssemblerX64XorlOperations) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ __ movq(rax, Operand(arg2, 0));
+ __ xorl(Operand(arg1, 0), rax);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int64_t left = V8_2PART_UINT64_C(0x10000000, 20000000);
+ int64_t right = V8_2PART_UINT64_C(0x30000000, 60000000);
+ int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
+ CHECK_EQ(V8_2PART_UINT64_C(0x10000000, 40000000), left);
+ USE(result);
+}
+
+
TEST(AssemblerX64MemoryOperands) {
// Allocate an executable page of memory.
size_t actual_size;
@@ -200,7 +327,7 @@ TEST(AssemblerX64MemoryOperands) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
__ push(rbp);
@@ -234,7 +361,7 @@ TEST(AssemblerX64ControlFlow) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 1 and returns it.
__ push(rbp);
@@ -263,7 +390,7 @@ TEST(AssemblerX64LoopImmediates) {
&actual_size,
true));
CHECK(buffer);
- Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble two loops using rax as counter, and verify the ending counts.
Label Fail;
__ movq(rax, Immediate(-3));
@@ -353,7 +480,7 @@ TEST(AssemblerX64LabelChaining) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- Assembler assm(Isolate::Current(), NULL, 0);
+ Assembler assm(CcTest::i_isolate(), NULL, 0);
Label target;
__ j(equal, &target);
@@ -366,8 +493,8 @@ TEST(AssemblerX64LabelChaining) {
TEST(AssemblerMultiByteNop) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::internal::byte buffer[1024];
- Isolate* isolate = Isolate::Current();
+ byte buffer[1024];
+ Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
__ push(rbx);
__ push(rcx);
@@ -420,7 +547,7 @@ TEST(AssemblerMultiByteNop) {
Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- v8::internal::Handle<Code>())->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -433,15 +560,14 @@ TEST(AssemblerMultiByteNop) {
#define ELEMENT_COUNT 4
void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::internal::byte buffer[1024];
+ byte buffer[1024];
CHECK(args[0]->IsArray());
v8::Local<v8::Array> vec = v8::Local<v8::Array>::Cast(args[0]);
CHECK_EQ(ELEMENT_COUNT, vec->Length());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
// Remove return address from the stack for fix stack frame alignment.
@@ -473,7 +599,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- v8::internal::Handle<Code>())->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -483,9 +609,10 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(StackAlignmentForSSE2) {
+ CcTest::InitializeVM();
CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
@@ -517,4 +644,36 @@ TEST(StackAlignmentForSSE2) {
#endif // __GNUC__
+TEST(AssemblerX64Extractps) {
+ CcTest::InitializeVM();
+ if (!CpuFeatures::IsSupported(SSE4_1)) return;
+
+ v8::HandleScope scope(CcTest::isolate());
+ byte buffer[256];
+ Isolate* isolate = CcTest::i_isolate();
+ Assembler assm(isolate, buffer, sizeof(buffer));
+ { CpuFeatureScope fscope2(&assm, SSE4_1);
+ __ extractps(rax, xmm0, 0x1);
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked());
+ CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+ Code::cast(code)->Print();
+#endif
+
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ uint64_t value1 = V8_2PART_UINT64_C(0x12345678, 87654321);
+ CHECK_EQ(0x12345678, f(uint64_to_double(value1)));
+ uint64_t value2 = V8_2PART_UINT64_C(0x87654321, 12345678);
+ CHECK_EQ(0x87654321, f(uint64_to_double(value2)));
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 9f20110801..299f2a8960 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -39,10 +39,10 @@ TEST(List) {
List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate);
AstNodeFactory<AstNullVisitor> factory(isolate, &zone);
- AstNode* node = factory.NewEmptyStatement();
+ AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
list->Add(node);
CHECK_EQ(1, list->length());
CHECK_EQ(node, list->at(0));
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index c99433edad..54eaa58318 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -143,7 +143,7 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
- return reinterpret_cast<int32_t>(CALL_GENERATED_CODE(func, from, 0, 0, 0, 0));
+ return CALL_GENERATED_FP_INT(func, from, 0);
#else
return (*func)(from);
#endif
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 7e87e10991..9fd68e5222 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -79,7 +79,7 @@ v8::DeclareExtension kPrintExtensionDeclaration(&kPrintExtension);
static MaybeObject* GetGlobalProperty(const char* name) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Handle<String> internalized_name =
isolate->factory()->InternalizeUtf8String(name);
return isolate->context()->global_object()->GetProperty(*internalized_name);
@@ -87,7 +87,7 @@ static MaybeObject* GetGlobalProperty(const char* name) {
static void SetGlobalProperty(const char* name, Object* value) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Handle<Object> object(value, isolate);
Handle<String> internalized_name =
isolate->factory()->InternalizeUtf8String(name);
@@ -97,7 +97,7 @@ static void SetGlobalProperty(const char* name, Object* value) {
static Handle<JSFunction> Compile(const char* source) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Handle<String> source_code(
isolate->factory()->NewStringFromUtf8(CStrVector(source)));
Handle<SharedFunctionInfo> shared_function =
@@ -202,8 +202,9 @@ TEST(Sum) {
TEST(Print) {
- CcTest::InitializeVM(PRINT_EXTENSION);
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
+ v8::Context::Scope context_scope(context);
const char* source = "for (n = 0; n < 100; ++n) print(n, 1, 2);";
Handle<JSFunction> fun = Compile(source);
if (fun.is_null()) return;
@@ -273,8 +274,10 @@ TEST(UncaughtThrow) {
// | JS |
// | C-to-JS |
TEST(C2JSFrames) {
- CcTest::InitializeVM(PRINT_EXTENSION | GC_EXTENSION);
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context =
+ CcTest::NewContext(PRINT_EXTENSION | GC_EXTENSION);
+ v8::Context::Scope context_scope(context);
const char* source = "function foo(a) { gc(), print(a); }";
@@ -312,12 +315,12 @@ TEST(C2JSFrames) {
// source resulted in crash.
TEST(Regression236) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<Script> script = factory->NewScript(factory->empty_string());
- script->set_source(HEAP->undefined_value());
+ script->set_source(CcTest::heap()->undefined_value());
CHECK_EQ(-1, GetScriptLineNumber(script, 0));
CHECK_EQ(-1, GetScriptLineNumber(script, 100));
CHECK_EQ(-1, GetScriptLineNumber(script, -1));
@@ -325,7 +328,7 @@ TEST(Regression236) {
TEST(GetScriptLineNumber) {
- CcTest::InitializeVM();
+ LocalContext context;
v8::HandleScope scope(CcTest::isolate());
v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
const char function_f[] = "function f() {}";
@@ -342,7 +345,7 @@ TEST(GetScriptLineNumber) {
v8::Handle<v8::String> script_body = v8::String::New(buffer.start());
v8::Script::Compile(script_body, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- CcTest::env()->Global()->Get(v8::String::New("f")));
+ context->Global()->Get(v8::String::New("f")));
CHECK_EQ(i, f->GetScriptLineNumber());
}
}
@@ -374,8 +377,10 @@ TEST(OptimizedCodeSharing) {
*v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure1"))));
Handle<JSFunction> fun2 = v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure2"))));
- CHECK(fun1->IsOptimized() || !fun1->IsOptimizable());
- CHECK(fun2->IsOptimized() || !fun2->IsOptimizable());
+ CHECK(fun1->IsOptimized()
+ || !CcTest::i_isolate()->use_crankshaft() || !fun1->IsOptimizable());
+ CHECK(fun2->IsOptimized()
+ || !CcTest::i_isolate()->use_crankshaft() || !fun2->IsOptimizable());
CHECK_EQ(fun1->code(), fun2->code());
}
}
@@ -420,16 +425,16 @@ static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
TEST(SplitConstantsInFullCompiler) {
- CcTest::InitializeVM();
+ LocalContext context;
v8::HandleScope scope(CcTest::isolate());
CompileRun("function f() { a = 12345678 }; f();");
- CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
CompileRun("function f(x) { a = 12345678 + x}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
CompileRun("function f(x) { var arguments = 1; x += 12345678}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
CompileRun("function f(x) { var arguments = 1; x = 12345678}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
}
#endif
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
new file mode 100644
index 0000000000..9f2436c034
--- /dev/null
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+
+// Test constant pool array code.
+
+#include "v8.h"
+
+#include "factory.h"
+#include "objects.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+
+TEST(ConstantPool) {
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(context->GetIsolate());
+
+ // Check construction.
+ Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(3, 2, 1);
+ CHECK_EQ(array->count_of_int64_entries(), 3);
+ CHECK_EQ(array->count_of_ptr_entries(), 2);
+ CHECK_EQ(array->count_of_int32_entries(), 1);
+ CHECK_EQ(array->length(), 6);
+ CHECK_EQ(array->first_int64_index(), 0);
+ CHECK_EQ(array->first_ptr_index(), 3);
+ CHECK_EQ(array->first_int32_index(), 5);
+
+ // Check getters and setters.
+ int64_t big_number = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
+ Handle<Object> object = factory->NewHeapNumber(4.0);
+ array->set(0, big_number);
+ array->set(1, 0.5);
+ array->set(3, *object);
+ array->set(5, 50);
+ CHECK_EQ(array->get_int64_entry(0), big_number);
+ CHECK_EQ(array->get_int64_entry_as_double(1), 0.5);
+ CHECK_EQ(array->get_ptr_entry(3), *object);
+ CHECK_EQ(array->get_int32_entry(5), 50);
+
+ // Check pointers are updated on GC.
+ Object* old_ptr = array->get_ptr_entry(3);
+ CHECK_EQ(*object, old_ptr);
+ heap->CollectGarbage(NEW_SPACE);
+ Object* new_ptr = array->get_ptr_entry(3);
+ CHECK_NE(*object, old_ptr);
+ CHECK_EQ(*object, new_ptr);
+}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 4708f506d2..9ef307c6f4 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -127,7 +127,7 @@ i::Code* CreateCode(LocalContext* env) {
TEST(CodeEvents) {
CcTest::InitializeVM();
LocalContext env;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
TestSetup test_setup;
@@ -196,7 +196,7 @@ static int CompareProfileNodes(const T* p1, const T* p2) {
TEST(TickEvents) {
TestSetup test_setup;
LocalContext env;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
i::Code* frame1_code = CreateCode(&env);
@@ -254,7 +254,7 @@ TEST(TickEvents) {
TEST(CrashIfStoppingLastNonExistentProfile) {
CcTest::InitializeVM();
TestSetup test_setup;
- CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
+ CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
profiler->StartProfiling("1");
profiler->StopProfiling("2");
profiler->StartProfiling("1");
@@ -267,7 +267,7 @@ TEST(CrashIfStoppingLastNonExistentProfile) {
TEST(Issue1398) {
TestSetup test_setup;
LocalContext env;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
i::Code* code = CreateCode(&env);
@@ -309,7 +309,7 @@ TEST(Issue1398) {
TEST(DeleteAllCpuProfiles) {
CcTest::InitializeVM();
TestSetup test_setup;
- CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
+ CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
CHECK_EQ(0, profiler->GetProfilesCount());
profiler->DeleteAllProfiles();
CHECK_EQ(0, profiler->GetProfilesCount());
@@ -396,26 +396,6 @@ TEST(DeleteCpuProfile) {
}
-TEST(GetProfilerWhenIsolateIsNotInitialized) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- CHECK(i::Isolate::Current()->IsDefaultIsolate());
- CHECK(!i::Isolate::Current()->IsInitialized());
- CHECK_EQ(NULL, isolate->GetCpuProfiler());
- {
- v8::Isolate::Scope isolateScope(isolate);
- LocalContext env;
- v8::HandleScope scope(isolate);
- CHECK_NE(NULL, isolate->GetCpuProfiler());
- isolate->GetCpuProfiler()->StartCpuProfiling(v8::String::New("Test"));
- isolate->GetCpuProfiler()->StopCpuProfiling(v8::String::New("Test"));
- }
- CHECK(i::Isolate::Current()->IsInitialized());
- CHECK_NE(NULL, isolate->GetCpuProfiler());
- isolate->Dispose();
- CHECK_EQ(NULL, isolate->GetCpuProfiler());
-}
-
-
TEST(ProfileStartEndTime) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -495,7 +475,12 @@ static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
const char* name) {
const v8::CpuProfileNode* result = FindChild(node, name);
- CHECK(result);
+ if (!result) {
+ char buffer[100];
+ i::OS::SNPrintF(Vector<char>(buffer, ARRAY_SIZE(buffer)),
+ "Failed to GetChild: %s", name);
+ FATAL(buffer);
+ }
return result;
}
@@ -610,7 +595,7 @@ static const char* cpu_profiler_test_source2 = "function loop() {}\n"
" } while (++k < count*100*1000);\n"
"}\n";
-// Check that the profile tree doesn't contain unexpecte traces:
+// Check that the profile tree doesn't contain unexpected traces:
// - 'loop' can be called only by 'delay'
// - 'delay' may be called only by 'start'
// The profile will look like the following:
@@ -973,7 +958,7 @@ TEST(FunctionCallSample) {
v8::HandleScope scope(env->GetIsolate());
// Collect garbage that might have be generated while installing extensions.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
v8::Script::Compile(v8::String::New(call_function_test_source))->Run();
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
@@ -1101,7 +1086,13 @@ TEST(FunctionApplySample) {
}
-static const char* js_native_js_test_source = "function foo(iterations) {\n"
+static const char* js_native_js_test_source =
+"var is_profiling = false;\n"
+"function foo(iterations) {\n"
+" if (!is_profiling) {\n"
+" is_profiling = true;\n"
+" startProfiling('my_profile');\n"
+" }\n"
" var r = 0;\n"
" for (var i = 0; i < iterations; i++) { r += i; }\n"
" return r;\n"
@@ -1133,7 +1124,9 @@ static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
// 55 1 bar #16 5
// 54 54 foo #16 6
TEST(JsNativeJsSample) {
- LocalContext env;
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ LocalContext env(&config);
v8::HandleScope scope(env->GetIsolate());
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
@@ -1149,7 +1142,7 @@ TEST(JsNativeJsSample) {
int32_t duration_ms = 20;
v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 50);
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
@@ -1177,7 +1170,12 @@ TEST(JsNativeJsSample) {
static const char* js_native_js_runtime_js_test_source =
+"var is_profiling = false;\n"
"function foo(iterations) {\n"
+" if (!is_profiling) {\n"
+" is_profiling = true;\n"
+" startProfiling('my_profile');\n"
+" }\n"
" var r = 0;\n"
" for (var i = 0; i < iterations; i++) { r += i; }\n"
" return r;\n"
@@ -1204,7 +1202,9 @@ static const char* js_native_js_runtime_js_test_source =
// 51 51 foo #16 6
// 2 2 (program) #0 2
TEST(JsNativeJsRuntimeJsSample) {
- LocalContext env;
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ LocalContext env(&config);
v8::HandleScope scope(env->GetIsolate());
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
@@ -1221,7 +1221,7 @@ TEST(JsNativeJsRuntimeJsSample) {
int32_t duration_ms = 20;
v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 50);
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
@@ -1252,7 +1252,12 @@ static void CallJsFunction2(const v8::FunctionCallbackInfo<v8::Value>& info) {
static const char* js_native1_js_native2_js_test_source =
+"var is_profiling = false;\n"
"function foo(iterations) {\n"
+" if (!is_profiling) {\n"
+" is_profiling = true;\n"
+" startProfiling('my_profile');\n"
+" }\n"
" var r = 0;\n"
" for (var i = 0; i < iterations; i++) { r += i; }\n"
" return r;\n"
@@ -1279,7 +1284,9 @@ static const char* js_native1_js_native2_js_test_source =
// 54 54 foo #16 7
// 2 2 (program) #0 2
TEST(JsNative1JsNative2JsSample) {
- LocalContext env;
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ LocalContext env(&config);
v8::HandleScope scope(env->GetIsolate());
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
@@ -1301,7 +1308,7 @@ TEST(JsNative1JsNative2JsSample) {
int32_t duration_ms = 20;
v8::Handle<v8::Value> args[] = { v8::Integer::New(duration_ms) };
const v8::CpuProfile* profile =
- RunProfiler(env, function, args, ARRAY_SIZE(args), 50);
+ RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
ScopedVector<v8::Handle<v8::String> > names(3);
@@ -1341,7 +1348,7 @@ TEST(IdleTime) {
v8::Local<v8::String> profile_name = v8::String::New("my_profile");
cpu_profiler->StartCpuProfiling(profile_name);
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::ProfilerEventsProcessor* processor = isolate->cpu_profiler()->processor();
processor->AddCurrentStack(isolate);
@@ -1380,3 +1387,56 @@ TEST(IdleTime) {
cpu_profiler->DeleteAllCpuProfiles();
}
+
+
+static void CheckFunctionDetails(const v8::CpuProfileNode* node,
+ const char* name, const char* script_name, int script_id,
+ int line, int column) {
+ CHECK_EQ(v8::String::New(name), node->GetFunctionName());
+ CHECK_EQ(v8::String::New(script_name), node->GetScriptResourceName());
+ CHECK_EQ(script_id, node->GetScriptId());
+ CHECK_EQ(line, node->GetLineNumber());
+ CHECK_EQ(column, node->GetColumnNumber());
+}
+
+
+TEST(FunctionDetails) {
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ LocalContext env(&config);
+ v8::HandleScope handleScope(env->GetIsolate());
+
+ v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
+ CHECK_EQ(0, profiler->GetProfileCount());
+ v8::Handle<v8::Script> script_a = v8::Script::Compile(v8::String::New(
+ " function foo\n() { try { bar(); } catch(e) {} }\n"
+ " function bar() { startProfiling(); }\n"), v8::String::New("script_a"));
+ script_a->Run();
+ v8::Handle<v8::Script> script_b = v8::Script::Compile(v8::String::New(
+ "\n\n function baz() { try { foo(); } catch(e) {} }\n"
+ "\n\nbaz();\n"
+ "stopProfiling();\n"), v8::String::New("script_b"));
+ script_b->Run();
+ CHECK_EQ(1, profiler->GetProfileCount());
+ const v8::CpuProfile* profile = profiler->GetCpuProfile(0);
+ const v8::CpuProfileNode* current = profile->GetTopDownRoot();
+ reinterpret_cast<ProfileNode*>(
+ const_cast<v8::CpuProfileNode*>(current))->Print(0);
+ // The tree should look like this:
+ // 0 (root) 0 #1
+ // 0 (anonymous function) 19 #2 no reason script_b:1
+ // 0 baz 19 #3 TryCatchStatement script_b:3
+ // 0 foo 18 #4 TryCatchStatement script_a:2
+ // 1 bar 18 #5 no reason script_a:3
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* script = GetChild(root,
+ ProfileGenerator::kAnonymousFunctionName);
+ CheckFunctionDetails(script, ProfileGenerator::kAnonymousFunctionName,
+ "script_b", script_b->GetId(), 1, 1);
+ const v8::CpuProfileNode* baz = GetChild(script, "baz");
+ CheckFunctionDetails(baz, "baz", "script_b", script_b->GetId(), 3, 16);
+ const v8::CpuProfileNode* foo = GetChild(baz, "foo");
+ CheckFunctionDetails(foo, "foo", "script_a", script_a->GetId(), 2, 1);
+ const v8::CpuProfileNode* bar = GetChild(foo, "bar");
+ CheckFunctionDetails(bar, "bar", "script_a", script_a->GetId(), 3, 14);
+}
diff --git a/deps/v8/test/cctest/test-dataflow.cc b/deps/v8/test/cctest/test-dataflow.cc
index f3f0308236..532c9207b6 100644
--- a/deps/v8/test/cctest/test-dataflow.cc
+++ b/deps/v8/test/cctest/test-dataflow.cc
@@ -36,7 +36,7 @@ using namespace v8::internal;
TEST(BitVector) {
v8::internal::V8::Initialize(NULL);
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
{
BitVector v(15, &zone);
v.Add(1);
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 6336481dcc..460c07e5aa 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -109,7 +109,7 @@ static int64_t TimeFromYearMonthDay(DateCache* date_cache,
static void CheckDST(int64_t time) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
DateCache* date_cache = isolate->date_cache();
int64_t actual = date_cache->ToLocal(time);
int64_t expected = time + date_cache->GetLocalOffsetFromOS() +
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 2540a3dfe5..1bd1dc3a0d 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -29,7 +29,6 @@
#include <stdlib.h>
-#define V8_DISABLE_DEPRECATIONS 1
#include "v8.h"
#include "api.h"
@@ -43,7 +42,6 @@
#include "platform/socket.h"
#include "stub-cache.h"
#include "utils.h"
-#undef V8_DISABLE_DEPRECATIONS
using ::v8::internal::Mutex;
@@ -142,9 +140,9 @@ class DebugLocalContext {
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>(),
v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
- : scope_(v8::Isolate::GetCurrent()),
+ : scope_(CcTest::isolate()),
context_(
- v8::Context::New(v8::Isolate::GetCurrent(),
+ v8::Context::New(CcTest::isolate(),
extensions,
global_template,
global_object)) {
@@ -200,8 +198,10 @@ static v8::Local<v8::Function> CompileFunction(DebugLocalContext* env,
static v8::Local<v8::Function> CompileFunction(const char* source,
const char* function_name) {
v8::Script::Compile(v8::String::New(source))->Run();
+ v8::Local<v8::Object> global =
+ CcTest::isolate()->GetCurrentContext()->Global();
return v8::Local<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8::String::New(function_name)));
+ global->Get(v8::String::New(function_name)));
}
@@ -302,7 +302,7 @@ static int SetScriptBreakPointByNameFromJS(const char* script_name,
// Clear a break point.
static void ClearBreakPoint(int break_point) {
- v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+ v8::internal::Isolate* isolate = CcTest::i_isolate();
v8::internal::Debug* debug = isolate->debug();
debug->ClearBreakPoint(
Handle<Object>(v8::internal::Smi::FromInt(break_point), isolate));
@@ -364,7 +364,7 @@ static void ChangeScriptBreakPointIgnoreCountFromJS(int break_point_number,
// Change break on exception.
static void ChangeBreakOnException(bool caught, bool uncaught) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
debug->ChangeBreakOnException(v8::internal::BreakException, caught);
debug->ChangeBreakOnException(v8::internal::BreakUncaughtException, uncaught);
}
@@ -391,7 +391,7 @@ static void ChangeBreakOnExceptionFromJS(bool caught, bool uncaught) {
// Prepare to step to next break location.
static void PrepareStep(StepAction step_action) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
debug->PrepareStep(step_action, 1, StackFrame::NO_ID);
}
@@ -403,7 +403,7 @@ namespace internal {
// Collect the currently debugged functions.
Handle<FixedArray> GetDebuggedFunctions() {
- Debug* debug = Isolate::Current()->debug();
+ Debug* debug = CcTest::i_isolate()->debug();
v8::internal::DebugInfoListNode* node = debug->debug_info_list_;
@@ -416,7 +416,7 @@ Handle<FixedArray> GetDebuggedFunctions() {
// Allocate array for the debugged functions
Handle<FixedArray> debugged_functions =
- Isolate::Current()->factory()->NewFixedArray(count);
+ CcTest::i_isolate()->factory()->NewFixedArray(count);
// Run through the debug info objects and collect all functions.
count = 0;
@@ -430,7 +430,7 @@ Handle<FixedArray> GetDebuggedFunctions() {
static Handle<Code> ComputeCallDebugBreak(int argc) {
- return Isolate::Current()->stub_cache()->ComputeCallDebugBreak(argc,
+ return CcTest::i_isolate()->stub_cache()->ComputeCallDebugBreak(argc,
Code::CALL_IC);
}
@@ -439,15 +439,15 @@ static Handle<Code> ComputeCallDebugBreak(int argc) {
void CheckDebuggerUnloaded(bool check_functions) {
// Check that the debugger context is cleared and that there is no debug
// information stored for the debugger.
- CHECK(Isolate::Current()->debug()->debug_context().is_null());
- CHECK_EQ(NULL, Isolate::Current()->debug()->debug_info_list_);
+ CHECK(CcTest::i_isolate()->debug()->debug_context().is_null());
+ CHECK_EQ(NULL, CcTest::i_isolate()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Iterate the head and check that there are no debugger related objects left.
- HeapIterator iterator(HEAP);
+ HeapIterator iterator(CcTest::heap());
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
CHECK(!obj->IsBreakPointInfo());
@@ -472,8 +472,8 @@ void CheckDebuggerUnloaded(bool check_functions) {
void ForceUnloadDebugger() {
- Isolate::Current()->debugger()->never_unload_debugger_ = false;
- Isolate::Current()->debugger()->UnloadDebugger();
+ CcTest::i_isolate()->debugger()->never_unload_debugger_ = false;
+ CcTest::i_isolate()->debugger()->UnloadDebugger();
}
@@ -508,7 +508,7 @@ void CheckDebugBreakFunction(DebugLocalContext* env,
const char* source, const char* name,
int position, v8::internal::RelocInfo::Mode mode,
Code* debug_break) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// Create function and set the break point.
Handle<v8::internal::JSFunction> fun = v8::Utils::OpenHandle(
@@ -674,7 +674,7 @@ static void DebugEventBreakPointHitCount(
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
v8::Handle<v8::Object> event_data = event_details.GetEventData();
- v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+ v8::internal::Isolate* isolate = CcTest::i_isolate();
Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -787,7 +787,7 @@ static void DebugEventCounter(
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
v8::Handle<v8::Object> event_data = event_details.GetEventData();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -849,7 +849,7 @@ static void DebugEventEvaluate(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -876,7 +876,7 @@ static void DebugEventRemoveBreakPoint(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Value> data = event_details.GetCallbackData();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -894,7 +894,7 @@ StepAction step_action = StepIn; // Step action to perform when stepping.
static void DebugEventStep(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -921,7 +921,7 @@ static void DebugEventStepSequence(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -950,7 +950,7 @@ static void DebugEventStepSequence(
static void DebugEventBreakPointCollectGarbage(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -961,10 +961,10 @@ static void DebugEventBreakPointCollectGarbage(
break_point_hit_count++;
if (break_point_hit_count % 2 == 0) {
// Scavenge.
- HEAP->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
} else {
// Mark sweep compact.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
}
}
}
@@ -975,7 +975,7 @@ static void DebugEventBreakPointCollectGarbage(
static void DebugEventBreak(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -985,10 +985,10 @@ static void DebugEventBreak(
// Run the garbage collector to enforce heap verification if option
// --verify-heap is set.
- HEAP->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
// Set the break flag again to come back here as soon as possible.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(CcTest::isolate());
}
}
@@ -1001,7 +1001,8 @@ static void DebugEventBreakMax(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
- v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+ v8::Isolate* v8_isolate = CcTest::isolate();
+ v8::internal::Isolate* isolate = CcTest::i_isolate();
v8::internal::Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -1023,11 +1024,11 @@ static void DebugEventBreakMax(
}
// Set the break flag again to come back here as soon as possible.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(v8_isolate);
} else if (terminate_after_max_break_point_hit) {
// Terminate execution after the last break if requested.
- v8::V8::TerminateExecution();
+ v8::V8::TerminateExecution(v8_isolate);
}
// Perform a full deoptimization when the specified number of
@@ -1075,13 +1076,13 @@ TEST(DebugStub) {
"function f2(){x=1;}", "f2",
0,
v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- Isolate::Current()->builtins()->builtin(
+ CcTest::i_isolate()->builtins()->builtin(
Builtins::kStoreIC_DebugBreak));
CheckDebugBreakFunction(&env,
"function f3(){var a=x;}", "f3",
0,
v8::internal::RelocInfo::CODE_TARGET_CONTEXT,
- Isolate::Current()->builtins()->builtin(
+ CcTest::i_isolate()->builtins()->builtin(
Builtins::kLoadIC_DebugBreak));
// TODO(1240753): Make the test architecture independent or split
@@ -1095,7 +1096,7 @@ TEST(DebugStub) {
"f4",
0,
v8::internal::RelocInfo::CODE_TARGET,
- Isolate::Current()->builtins()->builtin(
+ CcTest::i_isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_DebugBreak));
CheckDebugBreakFunction(
&env,
@@ -1103,7 +1104,7 @@ TEST(DebugStub) {
"f5",
0,
v8::internal::RelocInfo::CODE_TARGET,
- Isolate::Current()->builtins()->builtin(
+ CcTest::i_isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_DebugBreak));
#endif
@@ -1113,7 +1114,7 @@ TEST(DebugStub) {
"f6",
0,
v8::internal::RelocInfo::CODE_TARGET,
- Isolate::Current()->builtins()->builtin(
+ CcTest::i_isolate()->builtins()->builtin(
Builtins::kCompareNilIC_DebugBreak));
// Check the debug break code stubs for call ICs with different number of
@@ -1449,12 +1450,12 @@ static void CallAndGC(v8::Local<v8::Object> recv,
CHECK_EQ(1 + i * 3, break_point_hit_count);
// Scavenge and call function.
- HEAP->CollectGarbage(v8::internal::NEW_SPACE);
+ CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
f->Call(recv, 0, NULL);
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
f->Call(recv, 0, NULL);
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
@@ -2258,7 +2259,7 @@ TEST(ScriptBreakPointLineTopLevel) {
}
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
SetScriptBreakPointByNameFromJS("test.html", 3, -1);
@@ -2397,7 +2398,8 @@ TEST(DebuggerStatementBreakpoint) {
// the correct results.
TEST(DebugEvaluate) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
// Create a function for checking the evaluation when hitting a break point.
@@ -2410,13 +2412,13 @@ TEST(DebugEvaluate) {
// Different expected vaules of x and a when in a break point (u = undefined,
// d = Hello, world!).
struct EvaluateCheck checks_uu[] = {
- {"x", v8::Undefined()},
- {"a", v8::Undefined()},
+ {"x", v8::Undefined(isolate)},
+ {"a", v8::Undefined(isolate)},
{NULL, v8::Handle<v8::Value>()}
};
struct EvaluateCheck checks_hu[] = {
{"x", v8::String::New("Hello, world!")},
- {"a", v8::Undefined()},
+ {"a", v8::Undefined(isolate)},
{NULL, v8::Handle<v8::Value>()}
};
struct EvaluateCheck checks_hh[] = {
@@ -2482,7 +2484,7 @@ TEST(DebugEvaluate) {
// parameter.
checks = checks_uu;
v8::Handle<v8::Value> argv_bar_1[2] = {
- v8::Undefined(),
+ v8::Undefined(isolate),
v8::Number::New(barbar_break_position)
};
bar->Call(env->Global(), 2, argv_bar_1);
@@ -2551,7 +2553,7 @@ v8::Handle<v8::Function> checkFrameEvalFunction;
static void CheckDebugEval(const v8::Debug::EventDetails& eventDetails) {
if (eventDetails.GetEvent() == v8::Break) {
++debugEventCount;
- v8::HandleScope handleScope(v8::Isolate::GetCurrent());
+ v8::HandleScope handleScope(CcTest::isolate());
v8::Handle<v8::Value> args[] = { eventDetails.GetExecutionState() };
CHECK(checkGlobalEvalFunction->Call(
@@ -2726,7 +2728,8 @@ TEST(DebugEvaluateWithoutStack) {
" \"expression\":\"v1\",\"disable_break\":true"
"}}";
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_111, buffer));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_111, buffer));
const char* command_112 = "{\"seq\":112,"
"\"type\":\"request\","
@@ -2736,7 +2739,7 @@ TEST(DebugEvaluateWithoutStack) {
" \"expression\":\"getAnimal()\",\"disable_break\":true"
"}}";
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_112, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_112, buffer));
const char* command_113 = "{\"seq\":113,"
"\"type\":\"request\","
@@ -2746,7 +2749,7 @@ TEST(DebugEvaluateWithoutStack) {
" \"expression\":\"239 + 566\",\"disable_break\":true"
"}}";
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_113, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_113, buffer));
v8::Debug::ProcessDebugMessages();
@@ -2847,7 +2850,7 @@ TEST(DebugStepKeyedLoadLoop) {
foo->Call(env->Global(), kArgc, args);
// With stepping all break locations are hit.
- CHECK_EQ(34, break_point_hit_count);
+ CHECK_EQ(35, break_point_hit_count);
v8::Debug::SetDebugEventListener2(NULL);
CheckDebuggerUnloaded();
@@ -2894,7 +2897,7 @@ TEST(DebugStepKeyedStoreLoop) {
foo->Call(env->Global(), kArgc, args);
// With stepping all break locations are hit.
- CHECK_EQ(33, break_point_hit_count);
+ CHECK_EQ(34, break_point_hit_count);
v8::Debug::SetDebugEventListener2(NULL);
CheckDebuggerUnloaded();
@@ -2938,7 +2941,7 @@ TEST(DebugStepNamedLoadLoop) {
foo->Call(env->Global(), 0, NULL);
// With stepping all break locations are hit.
- CHECK_EQ(54, break_point_hit_count);
+ CHECK_EQ(55, break_point_hit_count);
v8::Debug::SetDebugEventListener2(NULL);
CheckDebuggerUnloaded();
@@ -2982,7 +2985,7 @@ static void DoDebugStepNamedStoreLoop(int expected) {
// Test of the stepping mechanism for named load in a loop.
TEST(DebugStepNamedStoreLoop) {
- DoDebugStepNamedStoreLoop(23);
+ DoDebugStepNamedStoreLoop(24);
}
@@ -3101,7 +3104,8 @@ TEST(DebugStepLocals) {
TEST(DebugStepIf) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3125,14 +3129,14 @@ TEST(DebugStepIf) {
// Stepping through the true part.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_true[argc] = { v8::True() };
+ v8::Handle<v8::Value> argv_true[argc] = { v8::True(isolate) };
foo->Call(env->Global(), argc, argv_true);
CHECK_EQ(4, break_point_hit_count);
// Stepping through the false part.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_false[argc] = { v8::False() };
+ v8::Handle<v8::Value> argv_false[argc] = { v8::False(isolate) };
foo->Call(env->Global(), argc, argv_false);
CHECK_EQ(5, break_point_hit_count);
@@ -3354,7 +3358,7 @@ TEST(DebugStepForContinue) {
v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
result = foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(5, result->Int32Value());
- CHECK_EQ(51, break_point_hit_count);
+ CHECK_EQ(52, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
@@ -3362,7 +3366,7 @@ TEST(DebugStepForContinue) {
v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
result = foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(50, result->Int32Value());
- CHECK_EQ(456, break_point_hit_count);
+ CHECK_EQ(457, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener2(NULL);
@@ -3406,7 +3410,7 @@ TEST(DebugStepForBreak) {
v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
result = foo->Call(env->Global(), argc, argv_10);
CHECK_EQ(9, result->Int32Value());
- CHECK_EQ(54, break_point_hit_count);
+ CHECK_EQ(55, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
@@ -3414,7 +3418,7 @@ TEST(DebugStepForBreak) {
v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
result = foo->Call(env->Global(), argc, argv_100);
CHECK_EQ(99, result->Int32Value());
- CHECK_EQ(504, break_point_hit_count);
+ CHECK_EQ(505, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener2(NULL);
@@ -3503,7 +3507,8 @@ TEST(DebugStepWith) {
TEST(DebugConditional) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
v8::Debug::SetDebugEventListener2(DebugEventStep);
@@ -3527,7 +3532,7 @@ TEST(DebugConditional) {
step_action = StepIn;
break_point_hit_count = 0;
const int argc = 1;
- v8::Handle<v8::Value> argv_true[argc] = { v8::True() };
+ v8::Handle<v8::Value> argv_true[argc] = { v8::True(isolate) };
foo->Call(env->Global(), argc, argv_true);
CHECK_EQ(5, break_point_hit_count);
@@ -3755,7 +3760,8 @@ TEST(DebugStepFunctionApply) {
// Test that step in works with function.call.
TEST(DebugStepFunctionCall) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Create a function for testing stepping.
v8::Local<v8::Function> foo = CompileFunction(
@@ -3782,7 +3788,7 @@ TEST(DebugStepFunctionCall) {
// Check stepping where the if condition in bar is true.
break_point_hit_count = 0;
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { v8::True() };
+ v8::Handle<v8::Value> argv[argc] = { v8::True(isolate) };
foo->Call(env->Global(), argc, argv);
CHECK_EQ(8, break_point_hit_count);
@@ -3845,7 +3851,7 @@ TEST(BreakOnException) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::internal::Isolate::Current()->TraceException(false);
+ CcTest::i_isolate()->TraceException(false);
// Create functions for testing break on exception.
CompileFunction(&env, "function throws(){throw 1;}", "throws");
@@ -3991,7 +3997,7 @@ TEST(BreakOnCompileException) {
// For this test, we want to break on uncaught exceptions:
ChangeBreakOnException(false, true);
- v8::internal::Isolate::Current()->TraceException(false);
+ CcTest::i_isolate()->TraceException(false);
// Create a function for checking the function when hitting a break point.
frame_count = CompileFunction(&env, frame_count_source, "frame_count");
@@ -4162,7 +4168,7 @@ TEST(DebugBreak) {
f3->Call(env->Global(), 0, NULL);
// Set the debug break flag.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
// Call all functions with different argument count.
break_point_hit_count = 0;
@@ -4196,7 +4202,7 @@ TEST(DisableBreak) {
v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
// Set the debug break flag.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
// Call all functions with different argument count.
break_point_hit_count = 0;
@@ -4204,7 +4210,7 @@ TEST(DisableBreak) {
CHECK_EQ(1, break_point_hit_count);
{
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::internal::DisableBreak disable_break(isolate, true);
f->Call(env->Global(), 0, NULL);
@@ -4227,14 +4233,14 @@ static const char* kSimpleExtensionSource =
// http://crbug.com/28933
// Test that debug break is disabled when bootstrapper is active.
TEST(NoBreakWhenBootstrapping) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
// Register a debug event listener which sets the break flag and counts.
v8::Debug::SetDebugEventListener2(DebugEventCounter);
// Set the debug break flag.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(isolate);
break_point_hit_count = 0;
{
// Create a context with an extension to make sure that some JavaScript
@@ -4889,11 +4895,12 @@ void MessageQueueDebuggerThread::Run() {
// until the execution of source_2.
// Note: AsciiToUtf16 executes before SendCommand, so command is copied
// to buffer before buffer is sent to SendCommand.
- v8::Debug::SendCommand(buffer_1, AsciiToUtf16(command_1, buffer_1));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_2, buffer_2));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_3, buffer_2));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_3, buffer_2));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_3, buffer_2));
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Debug::SendCommand(isolate, buffer_1, AsciiToUtf16(command_1, buffer_1));
+ v8::Debug::SendCommand(isolate, buffer_2, AsciiToUtf16(command_2, buffer_2));
+ v8::Debug::SendCommand(isolate, buffer_2, AsciiToUtf16(command_3, buffer_2));
+ v8::Debug::SendCommand(isolate, buffer_2, AsciiToUtf16(command_3, buffer_2));
+ v8::Debug::SendCommand(isolate, buffer_2, AsciiToUtf16(command_3, buffer_2));
message_queue_barriers.barrier_2.Wait();
// Main thread compiles and runs source_2.
// Queued commands are executed at the start of compilation of source_2(
@@ -4913,18 +4920,20 @@ void MessageQueueDebuggerThread::Run() {
// Wait on break event from hitting "debugger" statement
message_queue_barriers.semaphore_2.Wait();
// These should execute after the "debugger" statement in source_2
- v8::Debug::SendCommand(buffer_1, AsciiToUtf16(command_1, buffer_1));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_2, buffer_2));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_3, buffer_2));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_single_step, buffer_2));
+ v8::Debug::SendCommand(isolate, buffer_1, AsciiToUtf16(command_1, buffer_1));
+ v8::Debug::SendCommand(isolate, buffer_2, AsciiToUtf16(command_2, buffer_2));
+ v8::Debug::SendCommand(isolate, buffer_2, AsciiToUtf16(command_3, buffer_2));
+ v8::Debug::SendCommand(
+ isolate, buffer_2, AsciiToUtf16(command_single_step, buffer_2));
// Run after 2 break events, 4 responses.
for (int i = 0; i < 6 ; ++i) {
message_queue_barriers.semaphore_1.Signal();
}
// Wait on break event after a single step executes.
message_queue_barriers.semaphore_2.Wait();
- v8::Debug::SendCommand(buffer_1, AsciiToUtf16(command_2, buffer_1));
- v8::Debug::SendCommand(buffer_2, AsciiToUtf16(command_continue, buffer_2));
+ v8::Debug::SendCommand(isolate, buffer_1, AsciiToUtf16(command_2, buffer_1));
+ v8::Debug::SendCommand(
+ isolate, buffer_2, AsciiToUtf16(command_continue, buffer_2));
// Run after 2 responses.
for (int i = 0; i < 2 ; ++i) {
message_queue_barriers.semaphore_1.Signal();
@@ -5030,7 +5039,8 @@ static void MessageHandlerCountingClientData(
TEST(SendClientDataToHandler) {
// Create a V8 environment
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
TestClientData::ResetCounters();
handled_client_data_instances_count = 0;
v8::Debug::SetMessageHandler2(MessageHandlerCountingClientData);
@@ -5052,16 +5062,18 @@ TEST(SendClientDataToHandler) {
"\"type\":\"request\","
"\"command\":\"continue\"}";
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_1, buffer),
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer),
new TestClientData());
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer), NULL);
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer),
+ v8::Debug::SendCommand(
+ isolate, buffer, AsciiToUtf16(command_2, buffer), NULL);
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer),
new TestClientData());
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer),
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer),
new TestClientData());
// All the messages will be processed on beforeCompile event.
CompileRun(source_1);
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_continue, buffer));
+ v8::Debug::SendCommand(
+ isolate, buffer, AsciiToUtf16(command_continue, buffer));
CHECK_EQ(3, TestClientData::constructor_call_counter);
CHECK_EQ(TestClientData::constructor_call_counter,
handled_client_data_instances_count);
@@ -5134,14 +5146,14 @@ void V8Thread::Run() {
"\n"
"foo();\n";
- v8::V8::Initialize();
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Debug::SetMessageHandler2(&ThreadedMessageHandler);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->Set(v8::String::New("ThreadedAtBarrier1"),
v8::FunctionTemplate::New(ThreadedAtBarrier1));
- v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent(),
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate(),
NULL,
global_template);
v8::Context::Scope context_scope(context);
@@ -5162,11 +5174,12 @@ void DebuggerThread::Run() {
"\"type\":\"request\","
"\"command\":\"continue\"}";
+ v8::Isolate* isolate = CcTest::isolate();
threaded_debugging_barriers.barrier_1.Wait();
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(isolate);
threaded_debugging_barriers.barrier_2.Wait();
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_1, buffer));
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
}
@@ -5249,9 +5262,10 @@ void BreakpointsV8Thread::Run() {
const char* source_2 = "cat(17);\n"
"cat(19);\n";
- v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(isolate);
v8::Debug::SetMessageHandler2(&BreakpointsMessageHandler);
CompileRun(source_1);
@@ -5323,12 +5337,14 @@ void BreakpointsDebuggerThread::Run() {
"\"command\":\"continue\"}";
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
// v8 thread initializes, runs source_1
breakpoints_barriers->barrier_1.Wait();
// 1:Set breakpoint in cat() (will get id 1).
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_1, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
// 2:Set breakpoint in dog() (will get id 2).
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
breakpoints_barriers->barrier_2.Wait();
// V8 thread starts compiling source_2.
// Automatic break happens, to run queued commands
@@ -5340,38 +5356,38 @@ void BreakpointsDebuggerThread::Run() {
// Must have hit breakpoint #1.
CHECK_EQ(1, break_event_breakpoint_id);
// 4:Evaluate dog() (which has a breakpoint).
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_3, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_3, buffer));
// V8 thread hits breakpoint in dog().
breakpoints_barriers->semaphore_1.Wait(); // wait for break event
// Must have hit breakpoint #2.
CHECK_EQ(2, break_event_breakpoint_id);
// 5:Evaluate (x + 1).
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_4, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_4, buffer));
// Evaluate (x + 1) finishes.
breakpoints_barriers->semaphore_1.Wait();
// Must have result 108.
CHECK_EQ(108, evaluate_int_result);
// 6:Continue evaluation of dog().
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_5, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_5, buffer));
// Evaluate dog() finishes.
breakpoints_barriers->semaphore_1.Wait();
// Must have result 107.
CHECK_EQ(107, evaluate_int_result);
// 7:Continue evaluation of source_2, finish cat(17), hit breakpoint
// in cat(19).
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_6, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_6, buffer));
// Message callback gets break event.
breakpoints_barriers->semaphore_1.Wait(); // wait for break event
// Must have hit breakpoint #1.
CHECK_EQ(1, break_event_breakpoint_id);
// 8: Evaluate dog() with breaks disabled.
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_7, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_7, buffer));
// Evaluate dog() finishes.
breakpoints_barriers->semaphore_1.Wait();
// Must have result 116.
CHECK_EQ(116, evaluate_int_result);
// 9: Continue evaluation of source2, reach end.
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_8, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_8, buffer));
}
@@ -5422,19 +5438,6 @@ TEST(SetMessageHandlerOnUninitializedVM) {
}
-TEST(DebugBreakOnUninitializedVM) {
- v8::Debug::DebugBreak();
-}
-
-
-TEST(SendCommandToUninitializedVM) {
- const char* dummy_command = "{}";
- uint16_t dummy_buffer[80];
- int dummy_length = AsciiToUtf16(dummy_command, dummy_buffer);
- v8::Debug::SendCommand(dummy_buffer, dummy_length);
-}
-
-
// Source for a JavaScript function which returns the data parameter of a
// function called in the context of the debugger. If no data parameter is
// passed it throws an exception.
@@ -5505,7 +5508,7 @@ static void CheckClosure(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(CallFunctionInDebugger) {
// Create and enter a context with the functions CheckFrameCount,
// CheckSourceLine and CheckDataParameter installed.
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->Set(v8::String::New("CheckFrameCount"),
v8::FunctionTemplate::New(CheckFrameCount));
@@ -5515,7 +5518,7 @@ TEST(CallFunctionInDebugger) {
v8::FunctionTemplate::New(CheckDataParameter));
global_template->Set(v8::String::New("CheckClosure"),
v8::FunctionTemplate::New(CheckClosure));
- v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent(),
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate(),
NULL,
global_template);
v8::Context::Scope context_scope(context);
@@ -5664,7 +5667,8 @@ static void SendContinueCommand() {
"\"type\":\"request\","
"\"command\":\"continue\"}";
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_continue, buffer));
+ v8::Debug::SendCommand(
+ CcTest::isolate(), buffer, AsciiToUtf16(command_continue, buffer));
}
@@ -5791,7 +5795,7 @@ void HostDispatchV8Thread::Run() {
"\n";
const char* source_2 = "cat(17);\n";
- v8::V8::Initialize();
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -5818,10 +5822,11 @@ void HostDispatchDebuggerThread::Run() {
"\"type\":\"request\","
"\"command\":\"continue\"}";
+ v8::Isolate* isolate = CcTest::isolate();
// v8 thread initializes, runs source_1
host_dispatch_barriers->barrier_1.Wait();
// 1: Set breakpoint in cat().
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_1, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
host_dispatch_barriers->barrier_2.Wait();
// v8 thread starts compiling source_2.
@@ -5829,7 +5834,7 @@ void HostDispatchDebuggerThread::Run() {
// Wait for host dispatch to be processed.
host_dispatch_barriers->semaphore_1.Wait();
// 2: Continue evaluation
- v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
+ v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
}
@@ -5878,7 +5883,7 @@ static void DebugMessageHandler() {
void DebugMessageDispatchV8Thread::Run() {
- v8::V8::Initialize();
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -5920,7 +5925,7 @@ TEST(DebuggerDebugMessageDispatch) {
TEST(DebuggerAgent) {
v8::V8::Initialize();
- i::Debugger* debugger = i::Isolate::Current()->debugger();
+ i::Debugger* debugger = CcTest::i_isolate()->debugger();
// Make sure these ports is not used by other tests to allow tests to run in
// parallel.
const int kPort1 = 5858 + FlagDependentPortOffset();
@@ -6235,7 +6240,7 @@ static void ContextCheckMessageHandler(const v8::Debug::Message& message) {
// Checks that this data is set correctly and that when the debug message
// handler is called the expected context is the one active.
TEST(ContextData) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
// Create two contexts.
@@ -6298,7 +6303,7 @@ static void DebugBreakMessageHandler(const v8::Debug::Message& message) {
if (message.IsEvent() && message.GetEvent() == v8::Break) {
message_handler_break_hit_count++;
if (message_handler_break_hit_count == 1) {
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(message.GetIsolate());
}
}
@@ -6364,7 +6369,7 @@ static void DebugEventDebugBreak(
// Keep forcing breaks.
if (break_point_hit_count < 20) {
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(CcTest::isolate());
}
}
}
@@ -6393,7 +6398,7 @@ TEST(RegExpDebugBreak) {
CHECK_EQ(12, result->Int32Value());
v8::Debug::SetDebugEventListener2(DebugEventDebugBreak);
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
result = f->Call(env->Global(), argc, argv);
// Check that there was only one break event. Matching RegExp should not
@@ -6412,7 +6417,7 @@ static void ExecuteScriptForContextCheck(
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>();
context_1 =
- v8::Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
+ v8::Context::New(CcTest::isolate(), NULL, global_template);
v8::Debug::SetMessageHandler2(message_handler);
@@ -6445,7 +6450,7 @@ static void ExecuteScriptForContextCheck(
// break event in an eval statement the expected context is the one returned by
// Message.GetEventContext.
TEST(EvalContextData) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
ExecuteScriptForContextCheck(ContextCheckMessageHandler);
@@ -6471,6 +6476,7 @@ static void DebugEvalContextCheckMessageHandler(
v8::String::Value json(message.GetJSON());
Utf16ToAscii(*json, json.length(), print_buffer);
+ v8::Isolate* isolate = message.GetIsolate();
if (IsBreakEventMessage(print_buffer)) {
break_count++;
if (!sent_eval) {
@@ -6486,7 +6492,8 @@ static void DebugEvalContextCheckMessageHandler(
"\"global\":true,\"disable_break\":false}}";
// Send evaluate command.
- v8::Debug::SendCommand(buffer, AsciiToUtf16(eval_command, buffer));
+ v8::Debug::SendCommand(
+ isolate, buffer, AsciiToUtf16(eval_command, buffer));
return;
} else {
// It's a break event caused by the evaluation request above.
@@ -6506,7 +6513,7 @@ static void DebugEvalContextCheckMessageHandler(
// Tests that context returned for break event is correct when the event occurs
// in 'evaluate' debugger request.
TEST(NestedBreakEventContextData) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
break_count = 0;
message_handler_hit_count = 0;
@@ -6535,7 +6542,7 @@ static void DebugEventScriptCollectedEvent(
// Test that scripts collected are reported through the debug event listener.
TEST(ScriptCollectedEvent) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
break_point_hit_count = 0;
script_collected_count = 0;
DebugLocalContext env;
@@ -6546,7 +6553,7 @@ TEST(ScriptCollectedEvent) {
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
script_collected_count = 0;
v8::Debug::SetDebugEventListener2(DebugEventScriptCollectedEvent);
@@ -6557,7 +6564,7 @@ TEST(ScriptCollectedEvent) {
// Do garbage collection to collect the script above which is no longer
// referenced.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(2, script_collected_count);
@@ -6582,7 +6589,7 @@ static void ScriptCollectedMessageHandler(const v8::Debug::Message& message) {
// ScriptCollected events.
TEST(ScriptCollectedEventContext) {
i::FLAG_stress_compaction = false;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::internal::Debug* debug =
reinterpret_cast<v8::internal::Isolate*>(isolate)->debug();
script_collected_message_count = 0;
@@ -6608,7 +6615,7 @@ TEST(ScriptCollectedEventContext) {
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
@@ -6625,7 +6632,7 @@ TEST(ScriptCollectedEventContext) {
// Do garbage collection to collect the script above which is no longer
// referenced.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(2, script_collected_message_count);
@@ -6660,7 +6667,7 @@ TEST(AfterCompileMessageWhenMessageHandlerIsReset) {
v8::Debug::SetMessageHandler2(NULL);
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
v8::Script::Compile(v8::String::New(script))->Run();
// Setting listener to NULL should cause debugger unload.
@@ -6684,7 +6691,7 @@ TEST(BreakMessageWhenMessageHandlerIsReset) {
v8::Debug::SetMessageHandler2(NULL);
v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
v8::Local<v8::Function> f =
v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
f->Call(env->Global(), 0, NULL);
@@ -6773,7 +6780,7 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
static void BreakMessageHandler(const v8::Debug::Message& message) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
if (message.IsEvent() && message.GetEvent() == v8::Break) {
// Count the number of breaks.
break_point_hit_count++;
@@ -6812,7 +6819,7 @@ TEST(NoDebugBreakInAfterCompileMessageHandler) {
v8::Debug::SetMessageHandler2(BreakMessageHandler);
// Set the debug break flag.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
// Create a function for testing stepping.
const char* src = "function f() { eval('var x = 10;'); } ";
@@ -6822,7 +6829,7 @@ TEST(NoDebugBreakInAfterCompileMessageHandler) {
CHECK_EQ(1, break_point_hit_count);
// Set the debug break flag again.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
f->Call(env->Global(), 0, NULL);
// There should be one more break event when the script is evaluated in 'f'.
CHECK_EQ(2, break_point_hit_count);
@@ -6843,7 +6850,8 @@ static void CountingMessageHandler(const v8::Debug::Message& message) {
// Test that debug messages get processed when ProcessDebugMessages is called.
TEST(ProcessDebugMessages) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
counting_message_handler_counter = 0;
@@ -6857,7 +6865,8 @@ TEST(ProcessDebugMessages) {
"\"command\":\"scripts\"}";
// Send scripts command.
- v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(
+ isolate, buffer, AsciiToUtf16(scripts_command, buffer));
CHECK_EQ(0, counting_message_handler_counter);
v8::Debug::ProcessDebugMessages();
@@ -6866,8 +6875,10 @@ TEST(ProcessDebugMessages) {
counting_message_handler_counter = 0;
- v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
- v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(
+ isolate, buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(
+ isolate, buffer, AsciiToUtf16(scripts_command, buffer));
CHECK_EQ(0, counting_message_handler_counter);
v8::Debug::ProcessDebugMessages();
// At least two messages should come
@@ -6899,7 +6910,8 @@ int BacktraceData::frame_counter;
// Test that debug messages get processed when ProcessDebugMessages is called.
TEST(Backtrace) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Debug::SetMessageHandler2(BacktraceData::MessageHandler);
@@ -6912,7 +6924,11 @@ TEST(Backtrace) {
// Check backtrace from ProcessDebugMessages.
BacktraceData::frame_counter = -10;
- v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(
+ isolate,
+ buffer,
+ AsciiToUtf16(scripts_command, buffer),
+ NULL);
v8::Debug::ProcessDebugMessages();
CHECK_EQ(BacktraceData::frame_counter, 0);
@@ -6921,7 +6937,11 @@ TEST(Backtrace) {
// Check backtrace from "void(0)" script.
BacktraceData::frame_counter = -10;
- v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(
+ isolate,
+ buffer,
+ AsciiToUtf16(scripts_command, buffer),
+ NULL);
script->Run();
CHECK_EQ(BacktraceData::frame_counter, 1);
@@ -6965,7 +6985,7 @@ TEST(DebugBreakFunctionApply) {
v8::Debug::SetDebugEventListener2(DebugEventBreakMax);
// Set the debug break flag before calling the code using function.apply.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
// Limit the number of debug breaks. This is a regression test for issue 493
// where this test would enter an infinite loop.
@@ -6991,10 +7011,10 @@ static void NamedGetterWithCallingContextCheck(
v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
CHECK_EQ(0, strcmp(*v8::String::Utf8Value(name), "a"));
- v8::Handle<v8::Context> current = v8::Context::GetCurrent();
+ v8::Handle<v8::Context> current = info.GetIsolate()->GetCurrentContext();
CHECK(current == debugee_context);
CHECK(current != debugger_context);
- v8::Handle<v8::Context> calling = v8::Context::GetCalling();
+ v8::Handle<v8::Context> calling = info.GetIsolate()->GetCallingContext();
CHECK(calling == debugee_context);
CHECK(calling != debugger_context);
info.GetReturnValue().Set(1);
@@ -7010,7 +7030,7 @@ static void DebugEventGetAtgumentPropertyValue(
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
if (event == v8::Break) {
break_point_hit_count++;
- CHECK(debugger_context == v8::Context::GetCurrent());
+ CHECK(debugger_context == CcTest::isolate()->GetCurrentContext());
v8::Handle<v8::Function> func = v8::Handle<v8::Function>::Cast(CompileRun(
"(function(exec_state) {\n"
" return (exec_state.frame(0).argumentValue(0).property('a').\n"
@@ -7025,7 +7045,7 @@ static void DebugEventGetAtgumentPropertyValue(
TEST(CallingContextIsNotDebugContext) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// Create and enter a debugee context.
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -7065,7 +7085,7 @@ TEST(CallingContextIsNotDebugContext) {
TEST(DebugContextIsPreservedBetweenAccesses) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context1 = v8::Debug::GetDebugContext();
v8::Local<v8::Context> context2 = v8::Debug::GetDebugContext();
CHECK_EQ(*context1, *context2);
@@ -7081,7 +7101,7 @@ static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
// Check that event details contain context where debug event occured.
TEST(DebugEventContext) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
expected_callback_data = v8::Int32::New(2010);
expected_context = v8::Context::New(isolate);
@@ -7112,7 +7132,8 @@ static void DebugEventBreakDataChecker(const v8::Debug::EventDetails& details) {
// Check that event details contain context where debug event occured.
TEST(DebugEventBreakData) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Debug::SetDebugEventListener2(DebugEventBreakDataChecker);
TestClientData::constructor_call_counter = 0;
@@ -7121,7 +7142,7 @@ TEST(DebugEventBreakData) {
expected_break_data = NULL;
was_debug_event_called = false;
was_debug_break_called = false;
- v8::Debug::DebugBreakForCommand();
+ v8::Debug::DebugBreakForCommand(NULL, isolate);
v8::Script::Compile(v8::String::New("(function(x){return x;})(1);"))->Run();
CHECK(was_debug_event_called);
CHECK(!was_debug_break_called);
@@ -7130,7 +7151,7 @@ TEST(DebugEventBreakData) {
expected_break_data = data1;
was_debug_event_called = false;
was_debug_break_called = false;
- v8::Debug::DebugBreakForCommand(data1);
+ v8::Debug::DebugBreakForCommand(data1, isolate);
v8::Script::Compile(v8::String::New("(function(x){return x+1;})(1);"))->Run();
CHECK(was_debug_event_called);
CHECK(!was_debug_break_called);
@@ -7138,7 +7159,7 @@ TEST(DebugEventBreakData) {
expected_break_data = NULL;
was_debug_event_called = false;
was_debug_break_called = false;
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(isolate);
v8::Script::Compile(v8::String::New("(function(x){return x+2;})(1);"))->Run();
CHECK(!was_debug_event_called);
CHECK(was_debug_break_called);
@@ -7147,8 +7168,8 @@ TEST(DebugEventBreakData) {
expected_break_data = data2;
was_debug_event_called = false;
was_debug_break_called = false;
- v8::Debug::DebugBreak();
- v8::Debug::DebugBreakForCommand(data2);
+ v8::Debug::DebugBreak(isolate);
+ v8::Debug::DebugBreakForCommand(data2, isolate);
v8::Script::Compile(v8::String::New("(function(x){return x+3;})(1);"))->Run();
CHECK(was_debug_event_called);
CHECK(was_debug_break_called);
@@ -7180,13 +7201,13 @@ static void DebugEventBreakDeoptimize(
v8::Handle<v8::String> function_name(result->ToString());
function_name->WriteUtf8(fn);
if (strcmp(fn, "bar") == 0) {
- i::Deoptimizer::DeoptimizeAll(v8::internal::Isolate::Current());
+ i::Deoptimizer::DeoptimizeAll(CcTest::i_isolate());
debug_event_break_deoptimize_done = true;
}
}
}
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(CcTest::isolate());
}
}
@@ -7215,7 +7236,7 @@ TEST(DeoptimizeDuringDebugBreak) {
v8::Script::Compile(v8::String::New("function bar(){}; bar()"))->Run();
// Set debug break and call bar again.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(env->GetIsolate());
v8::Script::Compile(v8::String::New("bar()"))->Run();
CHECK(debug_event_break_deoptimize_done);
@@ -7273,7 +7294,7 @@ static void DebugEventBreakWithOptimizedStack(
static void ScheduleBreak(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Debug::SetDebugEventListener2(DebugEventBreakWithOptimizedStack);
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(args.GetIsolate());
}
@@ -7338,7 +7359,7 @@ static void TestDebugBreakInLoop(const char* loop_head,
CompileRun(buffer.start());
// Set the debug break to enter the debugger as soon as possible.
- v8::Debug::DebugBreak();
+ v8::Debug::DebugBreak(CcTest::isolate());
// Call function with infinite loop.
CompileRun("f();");
@@ -7408,7 +7429,7 @@ static void DebugBreakInlineListener(
i::Handle<i::Script> source_script = i::Handle<i::Script>(i::Script::cast(
i::JSFunction::cast(*compiled_script)->shared()->script()));
- int break_id = v8::internal::Isolate::Current()->debug()->break_id();
+ int break_id = CcTest::i_isolate()->debug()->break_id();
char script[128];
i::Vector<char> script_vector(script, sizeof(script));
OS::SNPrintF(script_vector, "%%GetFrameCount(%d)", break_id);
@@ -7426,7 +7447,7 @@ static void DebugBreakInlineListener(
i::GetScriptLineNumber(source_script, result->Int32Value()));
}
v8::Debug::SetDebugEventListener2(NULL);
- v8::V8::TerminateExecution();
+ v8::V8::TerminateExecution(CcTest::isolate());
}
@@ -7526,7 +7547,7 @@ TEST(LiveEditEnabled) {
v8::internal::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetLiveEditEnabled(true);
+ v8::Debug::SetLiveEditEnabled(true, env->GetIsolate());
CompileRun("%LiveEditCompareStrings('', '')");
}
@@ -7535,7 +7556,7 @@ TEST(LiveEditDisabled) {
v8::internal::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetLiveEditEnabled(false);
+ v8::Debug::SetLiveEditEnabled(false), env->GetIsolate();
CompileRun("%LiveEditCompareStrings('', '')");
}
diff --git a/deps/v8/test/cctest/test-declarative-accessors.cc b/deps/v8/test/cctest/test-declarative-accessors.cc
index fa5a0452fd..fb22ccdbab 100644
--- a/deps/v8/test/cctest/test-declarative-accessors.cc
+++ b/deps/v8/test/cctest/test-declarative-accessors.cc
@@ -78,7 +78,7 @@ class DescriptorTestHelper {
DescriptorTestHelper() :
isolate_(NULL), array_(new AlignedArray), handle_array_(new HandleArray) {
v8::V8::Initialize();
- isolate_ = v8::Isolate::GetCurrent();
+ isolate_ = CcTest::isolate();
}
v8::Isolate* isolate_;
// Data objects.
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 18f142061b..de27286dac 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -52,7 +52,7 @@ class DeclarationContext {
virtual ~DeclarationContext() {
if (is_initialized_) {
- Isolate* isolate = Isolate::GetCurrent();
+ Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
Local<Context> context = Local<Context>::New(isolate, context_);
context->Exit();
@@ -116,7 +116,7 @@ DeclarationContext::DeclarationContext()
void DeclarationContext::InitializeIfNeeded() {
if (is_initialized_) return;
- Isolate* isolate = Isolate::GetCurrent();
+ Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
Local<FunctionTemplate> function = FunctionTemplate::New();
Local<Value> data = External::New(this);
@@ -143,8 +143,8 @@ void DeclarationContext::Check(const char* source,
InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now
// to avoid that.
- HEAP->CollectGarbage(v8::internal::NEW_SPACE);
- HandleScope scope(Isolate::GetCurrent());
+ CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
+ HandleScope scope(CcTest::isolate());
TryCatch catcher;
catcher.SetVerbose(true);
Local<Script> script = Script::Compile(String::New(source));
@@ -169,7 +169,8 @@ void DeclarationContext::Check(const char* source,
CHECK_EQ(value, catcher.Exception());
}
}
- HEAP->CollectAllAvailableGarbage(); // Clean slate for the next test.
+ // Clean slate for the next test.
+ CcTest::heap()->CollectAllAvailableGarbage();
}
@@ -226,14 +227,15 @@ v8::Handle<Integer> DeclarationContext::Query(Local<String> key) {
// Test global declaration of a property the interceptor doesn't know
// about and doesn't handle.
TEST(Unknown) {
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
+ v8::V8::Initialize();
{ DeclarationContext context;
context.Check("var x; x",
1, // access
1, // declaration
2, // declaration + initialization
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ DeclarationContext context;
@@ -257,15 +259,16 @@ TEST(Unknown) {
1, // access
2, // declaration + initialization
1, // declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ DeclarationContext context;
+ // SB 0 - BUG 1213579
context.Check("const x = 0; x",
1, // access
2, // declaration + initialization
1, // declaration
- EXPECT_RESULT, Undefined()); // SB 0 - BUG 1213579
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
}
@@ -281,7 +284,7 @@ class PresentPropertyContext: public DeclarationContext {
TEST(Present) {
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
{ PresentPropertyContext context;
context.Check("var x; x",
@@ -312,7 +315,7 @@ TEST(Present) {
1, // access
1, // initialization
1, // (re-)declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ PresentPropertyContext context;
@@ -335,14 +338,16 @@ class AbsentPropertyContext: public DeclarationContext {
TEST(Absent) {
- HandleScope scope(Isolate::GetCurrent());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::V8::Initialize();
+ HandleScope scope(isolate);
{ AbsentPropertyContext context;
context.Check("var x; x",
1, // access
1, // declaration
2, // declaration + initialization
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(isolate));
}
{ AbsentPropertyContext context;
@@ -366,7 +371,7 @@ TEST(Absent) {
1, // access
2, // declaration + initialization
1, // declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(isolate));
}
{ AbsentPropertyContext context;
@@ -374,7 +379,7 @@ TEST(Absent) {
1, // access
2, // declaration + initialization
1, // declaration
- EXPECT_RESULT, Undefined()); // SB 0 - BUG 1213579
+ EXPECT_RESULT, Undefined(isolate)); // SB 0 - BUG 1213579
}
{ AbsentPropertyContext context;
@@ -382,7 +387,7 @@ TEST(Absent) {
1, // access
1, // declaration
1, // declaration + initialization
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(isolate));
}
}
@@ -425,14 +430,15 @@ class AppearingPropertyContext: public DeclarationContext {
TEST(Appearing) {
- HandleScope scope(Isolate::GetCurrent());
+ v8::V8::Initialize();
+ HandleScope scope(CcTest::isolate());
{ AppearingPropertyContext context;
context.Check("var x; x",
1, // access
1, // declaration
2, // declaration + initialization
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ AppearingPropertyContext context;
@@ -456,7 +462,7 @@ TEST(Appearing) {
1, // access
2, // declaration + initialization
1, // declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ AppearingPropertyContext context;
@@ -464,7 +470,7 @@ TEST(Appearing) {
1, // access
2, // declaration + initialization
1, // declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
// Result is undefined because declaration succeeded but
// initialization to 0 failed (due to context behavior).
}
@@ -517,14 +523,15 @@ class ReappearingPropertyContext: public DeclarationContext {
TEST(Reappearing) {
- HandleScope scope(Isolate::GetCurrent());
+ v8::V8::Initialize();
+ HandleScope scope(CcTest::isolate());
{ ReappearingPropertyContext context;
context.Check("const x; var x = 0",
0,
3, // const declaration+initialization, var initialization
3, // 2 x declaration + var initialization
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
}
@@ -546,7 +553,7 @@ class ExistsInPrototypeContext: public DeclarationContext {
TEST(ExistsInPrototype) {
i::FLAG_es52_globals = true;
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
// Sanity check to make sure that the holder of the interceptor
// really is the prototype object.
@@ -563,7 +570,7 @@ TEST(ExistsInPrototype) {
0,
0,
0,
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ ExistsInPrototypeContext context;
@@ -579,7 +586,7 @@ TEST(ExistsInPrototype) {
0,
0,
0,
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
{ ExistsInPrototypeContext context;
@@ -609,14 +616,15 @@ class AbsentInPrototypeContext: public DeclarationContext {
TEST(AbsentInPrototype) {
i::FLAG_es52_globals = true;
- HandleScope scope(Isolate::GetCurrent());
+ v8::V8::Initialize();
+ HandleScope scope(CcTest::isolate());
{ AbsentInPrototypeContext context;
context.Check("if (false) { var x = 0; }; x",
0,
0,
0,
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
}
@@ -656,7 +664,7 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
TEST(ExistsInHiddenPrototype) {
i::FLAG_es52_globals = true;
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
{ ExistsInHiddenPrototypeContext context;
context.Check("var x; x",
@@ -688,7 +696,7 @@ TEST(ExistsInHiddenPrototype) {
0,
0,
1, // (re-)declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
}
// TODO(mstarzinger): The semantics of global const is vague.
@@ -706,8 +714,8 @@ TEST(ExistsInHiddenPrototype) {
class SimpleContext {
public:
SimpleContext()
- : handle_scope_(Isolate::GetCurrent()),
- context_(Context::New(Isolate::GetCurrent())) {
+ : handle_scope_(CcTest::isolate()),
+ context_(Context::New(CcTest::isolate())) {
context_->Enter();
}
@@ -749,7 +757,7 @@ class SimpleContext {
TEST(CrossScriptReferences) {
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
{ SimpleContext context;
context.Check("var x = 1; x",
@@ -794,7 +802,7 @@ TEST(CrossScriptReferencesHarmony) {
i::FLAG_harmony_scoping = true;
i::FLAG_harmony_modules = true;
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
const char* decs[] = {
"var x = 1; x", "x", "this.x",
@@ -822,7 +830,7 @@ TEST(CrossScriptConflicts) {
i::FLAG_harmony_scoping = true;
i::FLAG_harmony_modules = true;
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
const char* firsts[] = {
"var x = 1; x",
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 83a6354b2f..765b1ce55f 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -104,7 +104,7 @@ class AllowNativesSyntaxNoInliningNoConcurrent {
// Abort any ongoing incremental marking to make sure that all weak global
// handle callbacks are processed.
static void NonIncrementalGC() {
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
}
@@ -134,7 +134,7 @@ TEST(DeoptimizeSimple) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
// Test lazy deoptimization of a simple function. Call the function after the
// deoptimization while it is still activated further down the stack.
@@ -150,7 +150,7 @@ TEST(DeoptimizeSimple) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -172,7 +172,7 @@ TEST(DeoptimizeSimpleWithArguments) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
// Test lazy deoptimization of a simple function with some arguments. Call the
// function after the deoptimization while it is still activated further down
@@ -189,7 +189,7 @@ TEST(DeoptimizeSimpleWithArguments) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -213,7 +213,7 @@ TEST(DeoptimizeSimpleNested) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
}
@@ -237,7 +237,7 @@ TEST(DeoptimizeRecursive) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
@@ -269,7 +269,7 @@ TEST(DeoptimizeMultiple) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -290,7 +290,7 @@ TEST(DeoptimizeConstructor) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -307,7 +307,7 @@ TEST(DeoptimizeConstructor) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -336,7 +336,7 @@ TEST(DeoptimizeConstructorMultiple) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -367,7 +367,7 @@ TEST(DeoptimizeBinaryOperationADDString) {
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f('a+', new X());");
- CHECK(!i::Isolate::Current()->use_crankshaft() ||
+ CHECK(!CcTest::i_isolate()->use_crankshaft() ||
GetJSFunction(env->Global(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
@@ -382,7 +382,7 @@ TEST(DeoptimizeBinaryOperationADDString) {
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
CHECK_EQ("a+an X", *utf8);
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -419,7 +419,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f(7, new X());");
- CHECK(!i::Isolate::Current()->use_crankshaft() ||
+ CHECK(!CcTest::i_isolate()->use_crankshaft() ||
GetJSFunction((*env)->Global(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
@@ -438,7 +438,7 @@ TEST(DeoptimizeBinaryOperationADD) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -450,7 +450,7 @@ TEST(DeoptimizeBinaryOperationSUB) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -462,7 +462,7 @@ TEST(DeoptimizeBinaryOperationMUL) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -474,7 +474,7 @@ TEST(DeoptimizeBinaryOperationDIV) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -486,7 +486,7 @@ TEST(DeoptimizeBinaryOperationMOD) {
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -517,7 +517,7 @@ TEST(DeoptimizeCompare) {
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f('a', new X());");
- CHECK(!i::Isolate::Current()->use_crankshaft() ||
+ CHECK(!CcTest::i_isolate()->use_crankshaft() ||
GetJSFunction(env->Global(), "f")->IsOptimized());
// Call f and force deoptimization while processing the comparison.
@@ -529,7 +529,7 @@ TEST(DeoptimizeCompare) {
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -587,7 +587,7 @@ TEST(DeoptimizeLoadICStoreIC) {
CompileRun("g1(new X());");
CompileRun("f2(new X(), 'z');");
CompileRun("g2(new X(), 'z');");
- if (i::Isolate::Current()->use_crankshaft()) {
+ if (CcTest::i_isolate()->use_crankshaft()) {
CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
@@ -609,7 +609,7 @@ TEST(DeoptimizeLoadICStoreIC) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -671,7 +671,7 @@ TEST(DeoptimizeLoadICStoreICNested) {
CompileRun("g1(new X());");
CompileRun("f2(new X(), 'z');");
CompileRun("g2(new X(), 'z');");
- if (i::Isolate::Current()->use_crankshaft()) {
+ if (CcTest::i_isolate()->use_crankshaft()) {
CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
@@ -690,5 +690,5 @@ TEST(DeoptimizeLoadICStoreICNested) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(Isolate::Current()));
+ CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index b9e8b1ec06..44f64f7881 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -41,7 +41,7 @@ using namespace v8::internal;
TEST(ObjectHashTable) {
LocalContext context;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
Handle<ObjectHashTable> table = factory->NewObjectHashTable(23);
@@ -50,13 +50,13 @@ TEST(ObjectHashTable) {
table = PutIntoObjectHashTable(table, a, b);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(*a), *b);
- CHECK_EQ(table->Lookup(*b), HEAP->the_hole_value());
+ CHECK_EQ(table->Lookup(*b), CcTest::heap()->the_hole_value());
// Keys still have to be valid after objects were moved.
- HEAP->CollectGarbage(NEW_SPACE);
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(table->NumberOfElements(), 1);
CHECK_EQ(table->Lookup(*a), *b);
- CHECK_EQ(table->Lookup(*b), HEAP->the_hole_value());
+ CHECK_EQ(table->Lookup(*b), CcTest::heap()->the_hole_value());
// Keys that are overwritten should not change number of elements.
table = PutIntoObjectHashTable(table, a, factory->NewJSArray(13));
@@ -67,7 +67,7 @@ TEST(ObjectHashTable) {
table = PutIntoObjectHashTable(table, a, factory->the_hole_value());
CHECK_EQ(table->NumberOfElements(), 0);
CHECK_EQ(table->NumberOfDeletedElements(), 1);
- CHECK_EQ(table->Lookup(*a), HEAP->the_hole_value());
+ CHECK_EQ(table->Lookup(*a), CcTest::heap()->the_hole_value());
// Keys should map back to their respective values and also should get
// an identity hash code generated.
@@ -87,7 +87,7 @@ TEST(ObjectHashTable) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK(key->GetIdentityHash(ALLOW_CREATION)->ToObjectChecked()->IsSmi());
CHECK_EQ(table->FindEntry(*key), ObjectHashTable::kNotFound);
- CHECK_EQ(table->Lookup(*key), HEAP->the_hole_value());
+ CHECK_EQ(table->Lookup(*key), CcTest::heap()->the_hole_value());
CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
}
@@ -95,8 +95,9 @@ TEST(ObjectHashTable) {
// should not get an identity hash code generated.
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
- CHECK_EQ(table->Lookup(*key), HEAP->the_hole_value());
- CHECK_EQ(key->GetIdentityHash(OMIT_CREATION), HEAP->undefined_value());
+ CHECK_EQ(table->Lookup(*key), CcTest::heap()->the_hole_value());
+ CHECK_EQ(key->GetIdentityHash(OMIT_CREATION),
+ CcTest::heap()->undefined_value());
}
}
@@ -120,7 +121,7 @@ class ObjectHashTableTest: public ObjectHashTable {
TEST(HashTableRehash) {
LocalContext context;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
// Test almost filled table.
@@ -156,7 +157,7 @@ TEST(HashTableRehash) {
TEST(ObjectHashSetCausesGC) {
i::FLAG_stress_compaction = false;
LocalContext context;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
Handle<ObjectHashSet> table = factory->NewObjectHashSet(1);
@@ -170,8 +171,8 @@ TEST(ObjectHashSetCausesGC) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- SimulateFullSpace(HEAP->new_space());
- SimulateFullSpace(HEAP->old_pointer_space());
+ SimulateFullSpace(CcTest::heap()->new_space());
+ SimulateFullSpace(CcTest::heap()->old_pointer_space());
// Calling Contains() should not cause GC ever.
CHECK(!table->Contains(*key));
@@ -189,7 +190,7 @@ TEST(ObjectHashSetCausesGC) {
TEST(ObjectHashTableCausesGC) {
i::FLAG_stress_compaction = false;
LocalContext context;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(context->GetIsolate());
Handle<ObjectHashTable> table = factory->NewObjectHashTable(1);
@@ -203,8 +204,8 @@ TEST(ObjectHashTableCausesGC) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- SimulateFullSpace(HEAP->new_space());
- SimulateFullSpace(HEAP->old_pointer_space());
+ SimulateFullSpace(CcTest::heap()->new_space());
+ SimulateFullSpace(CcTest::heap()->old_pointer_space());
// Calling Lookup() should not cause GC ever.
CHECK(table->Lookup(*key)->IsTheHole());
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 2a53d43d41..cb1b1c798b 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -65,7 +65,7 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// in the rest of the macros.
#define SET_UP() \
CcTest::InitializeVM(); \
- Isolate* isolate = Isolate::Current(); \
+ Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
Assembler assm(isolate, buffer, 4*1024); \
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 1b6af47233..301545c6c4 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -354,19 +354,29 @@ TEST(DisasmIa320) {
CpuFeatureScope fscope(&assm, SSE2);
__ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
+ __ movaps(xmm0, xmm1);
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
+
__ addsd(xmm1, xmm0);
__ mulsd(xmm1, xmm0);
__ subsd(xmm1, xmm0);
__ divsd(xmm1, xmm0);
- __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
__ ucomisd(xmm0, xmm1);
+ __ cmpltsd(xmm0, xmm1);
- // 128 bit move instructions.
- __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
- __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
- __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
- __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ andps(xmm0, xmm1);
+ __ andpd(xmm0, xmm1);
+ __ psllq(xmm0, 17);
+ __ psllq(xmm0, xmm1);
+ __ psrlq(xmm0, 17);
+ __ psrlq(xmm0, xmm1);
+ __ por(xmm0, xmm1);
}
}
@@ -393,42 +403,13 @@ TEST(DisasmIa320) {
}
}
- // andpd, cmpltsd, movaps, psllq, psrlq, por.
- {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(&assm, SSE2);
- __ andpd(xmm0, xmm1);
- __ andpd(xmm1, xmm2);
-
- __ cmpltsd(xmm0, xmm1);
- __ cmpltsd(xmm1, xmm2);
-
- __ movaps(xmm0, xmm1);
- __ movaps(xmm1, xmm2);
-
- __ psllq(xmm0, 17);
- __ psllq(xmm1, 42);
-
- __ psllq(xmm0, xmm1);
- __ psllq(xmm1, xmm2);
-
- __ psrlq(xmm0, 17);
- __ psrlq(xmm1, 42);
-
- __ psrlq(xmm0, xmm1);
- __ psrlq(xmm1, xmm2);
-
- __ por(xmm0, xmm1);
- __ por(xmm1, xmm2);
- }
- }
-
{
if (CpuFeatures::IsSupported(SSE2) &&
CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(&assm, SSE4_1);
__ pextrd(eax, xmm0, 1);
__ pinsrd(xmm1, eax, 0);
+ __ extractps(eax, xmm1, 0);
}
}
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 0e79a580f2..725b3a5674 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -65,7 +65,7 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// in the rest of the macros.
#define SET_UP() \
CcTest::InitializeVM(); \
- Isolate* isolate = Isolate::Current(); \
+ Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
Assembler assm(isolate, buffer, 4*1024); \
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 1ff9fd336b..8fd036956f 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -50,7 +50,7 @@ TEST(DisasmX64) {
CcTest::InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[2048];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
// Short immediate instructions
@@ -239,7 +239,7 @@ TEST(DisasmX64) {
__ bind(&L2);
__ call(Operand(rbx, rcx, times_4, 10000));
__ nop();
- Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Handle<Code> ic(CcTest::i_isolate()->builtins()->builtin(
Builtins::kLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
@@ -335,60 +335,59 @@ TEST(DisasmX64) {
__ fcompp();
__ fwait();
__ nop();
+
+ // SSE instruction
{
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
- __ cvttss2si(rdx, xmm1);
- __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
- __ cvttsd2si(rdx, xmm1);
- __ cvttsd2siq(rdx, xmm1);
- __ addsd(xmm1, xmm0);
- __ mulsd(xmm1, xmm0);
- __ subsd(xmm1, xmm0);
- __ divsd(xmm1, xmm0);
- __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
- __ ucomisd(xmm0, xmm1);
-
- // 128 bit move instructions.
- __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
- }
+ __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ cvttss2si(rdx, xmm1);
+ __ movaps(xmm0, xmm1);
+
+ __ andps(xmm0, xmm1);
+ }
+ // SSE 2 instructions
+ {
+ __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ cvttsd2si(rdx, xmm1);
+ __ cvttsd2siq(rdx, xmm1);
+ __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
+
+ __ addsd(xmm1, xmm0);
+ __ mulsd(xmm1, xmm0);
+ __ subsd(xmm1, xmm0);
+ __ divsd(xmm1, xmm0);
+ __ ucomisd(xmm0, xmm1);
+
+ __ andpd(xmm0, xmm1);
}
// cmov.
{
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmovq(overflow, rax, Operand(rax, 0));
- __ cmovq(no_overflow, rax, Operand(rax, 1));
- __ cmovq(below, rax, Operand(rax, 2));
- __ cmovq(above_equal, rax, Operand(rax, 3));
- __ cmovq(equal, rax, Operand(rbx, 0));
- __ cmovq(not_equal, rax, Operand(rbx, 1));
- __ cmovq(below_equal, rax, Operand(rbx, 2));
- __ cmovq(above, rax, Operand(rbx, 3));
- __ cmovq(sign, rax, Operand(rcx, 0));
- __ cmovq(not_sign, rax, Operand(rcx, 1));
- __ cmovq(parity_even, rax, Operand(rcx, 2));
- __ cmovq(parity_odd, rax, Operand(rcx, 3));
- __ cmovq(less, rax, Operand(rdx, 0));
- __ cmovq(greater_equal, rax, Operand(rdx, 1));
- __ cmovq(less_equal, rax, Operand(rdx, 2));
- __ cmovq(greater, rax, Operand(rdx, 3));
- }
+ __ cmovq(overflow, rax, Operand(rax, 0));
+ __ cmovq(no_overflow, rax, Operand(rax, 1));
+ __ cmovq(below, rax, Operand(rax, 2));
+ __ cmovq(above_equal, rax, Operand(rax, 3));
+ __ cmovq(equal, rax, Operand(rbx, 0));
+ __ cmovq(not_equal, rax, Operand(rbx, 1));
+ __ cmovq(below_equal, rax, Operand(rbx, 2));
+ __ cmovq(above, rax, Operand(rbx, 3));
+ __ cmovq(sign, rax, Operand(rcx, 0));
+ __ cmovq(not_sign, rax, Operand(rcx, 1));
+ __ cmovq(parity_even, rax, Operand(rcx, 2));
+ __ cmovq(parity_odd, rax, Operand(rcx, 3));
+ __ cmovq(less, rax, Operand(rdx, 0));
+ __ cmovq(greater_equal, rax, Operand(rdx, 1));
+ __ cmovq(less_equal, rax, Operand(rdx, 2));
+ __ cmovq(greater, rax, Operand(rdx, 3));
}
- // andpd, etc.
{
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ andpd(xmm0, xmm1);
- __ andpd(xmm1, xmm2);
-
- __ movaps(xmm0, xmm1);
- __ movaps(xmm1, xmm2);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(&assm, SSE4_1);
+ __ extractps(rax, xmm1, 0);
}
}
@@ -401,7 +400,7 @@ TEST(DisasmX64) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = CcTest::heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
Handle<Code>())->ToObjectChecked();
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 9cb12c4787..a1d2405ad5 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -54,15 +54,18 @@ TEST(Flags1) {
TEST(Flags2) {
SetFlagsToDefault();
- int argc = 7;
- const char* argv[] = { "Test2", "-notesting-bool-flag", "notaflag",
+ int argc = 8;
+ const char* argv[] = { "Test2", "-notesting-bool-flag",
+ "--notesting-maybe-bool-flag", "notaflag",
"--testing_int_flag=77", "-testing_float_flag=.25",
"--testing_string_flag", "no way!" };
CHECK_EQ(0, FlagList::SetFlagsFromCommandLine(&argc,
const_cast<char **>(argv),
false));
- CHECK_EQ(7, argc);
+ CHECK_EQ(8, argc);
CHECK(!FLAG_testing_bool_flag);
+ CHECK(FLAG_testing_maybe_bool_flag.has_value);
+ CHECK(!FLAG_testing_maybe_bool_flag.value);
CHECK_EQ(77, FLAG_testing_int_flag);
CHECK_EQ(.25, FLAG_testing_float_flag);
CHECK_EQ(0, strcmp(FLAG_testing_string_flag, "no way!"));
@@ -73,10 +76,13 @@ TEST(Flags2b) {
SetFlagsToDefault();
const char* str =
" -notesting-bool-flag notaflag --testing_int_flag=77 "
+ "-notesting-maybe-bool-flag "
"-testing_float_flag=.25 "
"--testing_string_flag no_way! ";
CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK(!FLAG_testing_bool_flag);
+ CHECK(FLAG_testing_maybe_bool_flag.has_value);
+ CHECK(!FLAG_testing_maybe_bool_flag.value);
CHECK_EQ(77, FLAG_testing_int_flag);
CHECK_EQ(.25, FLAG_testing_float_flag);
CHECK_EQ(0, strcmp(FLAG_testing_string_flag, "no_way!"));
@@ -85,9 +91,9 @@ TEST(Flags2b) {
TEST(Flags3) {
SetFlagsToDefault();
- int argc = 8;
+ int argc = 9;
const char* argv[] =
- { "Test3", "--testing_bool_flag", "notaflag",
+ { "Test3", "--testing_bool_flag", "--testing-maybe-bool-flag", "notaflag",
"--testing_int_flag", "-666",
"--testing_float_flag", "-12E10", "-testing-string-flag=foo-bar" };
CHECK_EQ(0, FlagList::SetFlagsFromCommandLine(&argc,
@@ -95,6 +101,8 @@ TEST(Flags3) {
true));
CHECK_EQ(2, argc);
CHECK(FLAG_testing_bool_flag);
+ CHECK(FLAG_testing_maybe_bool_flag.has_value);
+ CHECK(FLAG_testing_maybe_bool_flag.value);
CHECK_EQ(-666, FLAG_testing_int_flag);
CHECK_EQ(-12E10, FLAG_testing_float_flag);
CHECK_EQ(0, strcmp(FLAG_testing_string_flag, "foo-bar"));
@@ -104,11 +112,14 @@ TEST(Flags3) {
TEST(Flags3b) {
SetFlagsToDefault();
const char* str =
- "--testing_bool_flag notaflag --testing_int_flag -666 "
+ "--testing_bool_flag --testing-maybe-bool-flag notaflag "
+ "--testing_int_flag -666 "
"--testing_float_flag -12E10 "
"-testing-string-flag=foo-bar";
CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK(FLAG_testing_bool_flag);
+ CHECK(FLAG_testing_maybe_bool_flag.has_value);
+ CHECK(FLAG_testing_maybe_bool_flag.value);
CHECK_EQ(-666, FLAG_testing_int_flag);
CHECK_EQ(-12E10, FLAG_testing_float_flag);
CHECK_EQ(0, strcmp(FLAG_testing_string_flag, "foo-bar"));
@@ -123,6 +134,7 @@ TEST(Flags4) {
const_cast<char **>(argv),
true));
CHECK_EQ(2, argc);
+ CHECK(!FLAG_testing_maybe_bool_flag.has_value);
}
@@ -130,6 +142,7 @@ TEST(Flags4b) {
SetFlagsToDefault();
const char* str = "--testing_bool_flag --foo";
CHECK_EQ(2, FlagList::SetFlagsFromString(str, StrLength(str)));
+ CHECK(!FLAG_testing_maybe_bool_flag.has_value);
}
@@ -181,7 +194,7 @@ TEST(FlagsJSArguments1) {
true));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc());
+ CHECK_EQ(2, FLAG_js_arguments.argc);
CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
CHECK_EQ(1, argc);
@@ -194,7 +207,7 @@ TEST(FlagsJSArguments1b) {
CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc());
+ CHECK_EQ(2, FLAG_js_arguments.argc);
CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
}
@@ -206,7 +219,7 @@ TEST(FlagsJSArguments2) {
CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc());
+ CHECK_EQ(2, FLAG_js_arguments.argc);
CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
}
@@ -218,7 +231,7 @@ TEST(FlagsJSArguments3) {
CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
CHECK_EQ(2.5, FLAG_testing_float_flag);
- CHECK_EQ(2, FLAG_js_arguments.argc());
+ CHECK_EQ(2, FLAG_js_arguments.argc);
CHECK_EQ(0, strcmp(FLAG_js_arguments[0], "testing-float-flag"));
CHECK_EQ(0, strcmp(FLAG_js_arguments[1], "7"));
}
@@ -229,7 +242,7 @@ TEST(FlagsJSArguments4) {
const char* str = "--testing-int-flag 42 --";
CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
CHECK_EQ(42, FLAG_testing_int_flag);
- CHECK_EQ(0, FLAG_js_arguments.argc());
+ CHECK_EQ(0, FLAG_js_arguments.argc);
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index a0c4b1e728..1a000afba2 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -51,7 +51,7 @@ using ::v8::internal::String;
static void CheckFunctionName(v8::Handle<v8::Script> script,
const char* func_pos_src,
const char* ref_inferred_name) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
// Get script source.
@@ -82,7 +82,7 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
isolate->debug()->PrepareForBreakPoints();
Object* shared_func_info_ptr =
isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos);
- CHECK(shared_func_info_ptr != HEAP->undefined_value());
+ CHECK(shared_func_info_ptr != CcTest::heap()->undefined_value());
Handle<SharedFunctionInfo> shared_func_info(
SharedFunctionInfo::cast(shared_func_info_ptr));
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 1d33a8c86b..d0b80d1c8f 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -86,19 +86,19 @@ class TestObjectVisitor : public ObjectVisitor {
TEST(IterateObjectGroupsOldApi) {
CcTest::InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
-
+ GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+ Heap* heap = CcTest::heap();
v8::HandleScope handle_scope(CcTest::isolate());
Handle<Object> g1s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
TestRetainedObjectInfo info1;
TestRetainedObjectInfo info2;
@@ -181,19 +181,20 @@ TEST(IterateObjectGroupsOldApi) {
TEST(IterateObjectGroups) {
CcTest::InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+ Heap* heap = CcTest::heap();
v8::HandleScope handle_scope(CcTest::isolate());
Handle<Object> g1s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
TestRetainedObjectInfo info1;
TestRetainedObjectInfo info2;
@@ -275,24 +276,25 @@ TEST(IterateObjectGroups) {
TEST(ImplicitReferences) {
CcTest::InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+ Heap* heap = CcTest::heap();
v8::HandleScope handle_scope(CcTest::isolate());
Handle<Object> g1s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1c1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1c2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2c1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
global_handles->SetObjectGroupId(g1s1.location(), UniqueId(1));
global_handles->SetObjectGroupId(g2s1.location(), UniqueId(2));
@@ -319,7 +321,7 @@ TEST(ImplicitReferences) {
TEST(EternalHandles) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
EternalHandles* eternal_handles = isolate->eternal_handles();
diff --git a/deps/v8/test/cctest/test-global-object.cc b/deps/v8/test/cctest/test-global-object.cc
index b124b2728d..5fe77c2adf 100644
--- a/deps/v8/test/cctest/test-global-object.cc
+++ b/deps/v8/test/cctest/test-global-object.cc
@@ -34,7 +34,7 @@ using namespace v8;
// This test fails if properties on the prototype of the global object appear
// as declared globals.
TEST(StrictUndeclaredGlobalVariable) {
- HandleScope scope(Isolate::GetCurrent());
+ HandleScope scope(CcTest::isolate());
v8::Local<v8::String> var_name = v8_str("x");
LocalContext context;
v8::TryCatch try_catch;
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 65362698ed..3ec844e9c7 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -151,7 +151,7 @@ void generate(MacroAssembler* masm, uint32_t key) {
void check(i::Vector<const uint8_t> string) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -188,12 +188,12 @@ void check(i::Vector<const char> s) {
void check(uint32_t key) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
+ MacroAssembler masm(CcTest::i_isolate(), buffer, sizeof buffer);
generate(&masm, key);
@@ -230,7 +230,7 @@ static uint32_t PseudoRandom(uint32_t i, uint32_t j) {
TEST(StringHash) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(v8::Context::New(isolate));
@@ -251,7 +251,7 @@ TEST(StringHash) {
TEST(NumberHash) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(v8::Context::New(isolate));
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 0cf9cdaedf..db2243a3f0 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -31,6 +31,7 @@
#include "v8.h"
+#include "allocation-tracker.h"
#include "cctest.h"
#include "hashmap.h"
#include "heap-profiler.h"
@@ -39,6 +40,12 @@
#include "utils-inl.h"
#include "../include/v8-profiler.h"
+using i::AllocationTraceNode;
+using i::AllocationTraceTree;
+using i::AllocationTracker;
+using i::HashMap;
+using i::Vector;
+
namespace {
class NamedEntriesDetector {
@@ -413,7 +420,7 @@ TEST(HeapSnapshotSlicedString) {
TEST(HeapSnapshotConsString) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetInternalFieldCount(1);
@@ -422,7 +429,7 @@ TEST(HeapSnapshotConsString) {
v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(1, global->InternalFieldCount());
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = CcTest::i_isolate()->factory();
i::Handle<i::String> first =
factory->NewStringFromAscii(i::CStrVector("0123456789"));
i::Handle<i::String> second =
@@ -456,7 +463,7 @@ TEST(HeapSnapshotConsString) {
TEST(HeapSnapshotInternalReferences) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetInternalFieldCount(2);
@@ -505,7 +512,7 @@ TEST(HeapSnapshotAddressReuse) {
CompileRun(
"for (var i = 0; i < 10000; ++i)\n"
" a[i] = new A();\n");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
const v8::HeapSnapshot* snapshot2 =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot2"));
@@ -549,7 +556,7 @@ TEST(HeapEntryIdsAndArrayShift) {
"for (var i = 0; i < 1; ++i)\n"
" a.shift();\n");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
const v8::HeapSnapshot* snapshot2 =
heap_profiler->TakeHeapSnapshot(v8_str("s2"));
@@ -594,7 +601,7 @@ TEST(HeapEntryIdsAndGC) {
heap_profiler->TakeHeapSnapshot(s1_str);
CHECK(ValidateSnapshot(snapshot1));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
const v8::HeapSnapshot* snapshot2 =
heap_profiler->TakeHeapSnapshot(s2_str);
@@ -901,7 +908,7 @@ TEST(HeapSnapshotObjectsStats) {
// We have to call GC 6 times. In other case the garbage will be
// the reason of flakiness.
for (int i = 0; i < 6; ++i) {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
}
v8::SnapshotObjectId initial_id;
@@ -1482,7 +1489,7 @@ TEST(NoHandleLeaks) {
CompileRun("document = { URL:\"abcdefgh\" };");
v8::Handle<v8::String> name(v8_str("leakz"));
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
int count_before = i::HandleScope::NumberOfHandles(isolate);
heap_profiler->TakeHeapSnapshot(name);
int count_after = i::HandleScope::NumberOfHandles(isolate);
@@ -1738,7 +1745,7 @@ bool HasWeakEdge(const v8::HeapGraphNode* node) {
bool HasWeakGlobalHandle() {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("weaks"));
@@ -1800,7 +1807,7 @@ TEST(NoDebugObjectInSnapshot) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- v8::internal::Isolate::Current()->debug()->Load();
+ CcTest::i_isolate()->debug()->Load();
CompileRun("foo = {};");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
@@ -2005,3 +2012,168 @@ TEST(JSFunctionHasCodeLink) {
GetProperty(foo_func, v8::HeapGraphEdge::kInternal, "code");
CHECK_NE(NULL, code);
}
+
+
+
+class HeapProfilerExtension : public v8::Extension {
+ public:
+ static const char* kName;
+ HeapProfilerExtension() : v8::Extension(kName, kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static void FindUntrackedObjects(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+ private:
+ static const char* kSource;
+};
+
+const char* HeapProfilerExtension::kName = "v8/heap-profiler";
+
+
+const char* HeapProfilerExtension::kSource =
+ "native function findUntrackedObjects();";
+
+
+v8::Handle<v8::FunctionTemplate> HeapProfilerExtension::GetNativeFunction(
+ v8::Handle<v8::String> name) {
+ if (name->Equals(v8::String::New("findUntrackedObjects"))) {
+ return v8::FunctionTemplate::New(
+ HeapProfilerExtension::FindUntrackedObjects);
+ } else {
+ CHECK(false);
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+}
+
+
+void HeapProfilerExtension::FindUntrackedObjects(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::HeapProfiler* heap_profiler =
+ reinterpret_cast<i::HeapProfiler*>(args.GetIsolate()->GetHeapProfiler());
+ int untracked_objects = heap_profiler->FindUntrackedObjects();
+ args.GetReturnValue().Set(untracked_objects);
+ CHECK_EQ(0, untracked_objects);
+}
+
+
+static HeapProfilerExtension kHeapProfilerExtension;
+v8::DeclareExtension kHeapProfilerExtensionDeclaration(
+ &kHeapProfilerExtension);
+
+
+// This is an example of using checking of JS allocations tracking in a test.
+TEST(HeapObjectsTracker) {
+ const char* extensions[] = { HeapProfilerExtension::kName };
+ v8::ExtensionConfiguration config(1, extensions);
+ LocalContext env(&config);
+ v8::HandleScope scope(env->GetIsolate());
+ HeapObjectsTracker tracker;
+ CompileRun("var a = 1.2");
+ CompileRun("var a = 1.2; var b = 1.0; var c = 1.0;");
+ CompileRun(
+ "var a = [];\n"
+ "for (var i = 0; i < 5; ++i)\n"
+ " a[i] = i;\n"
+ "findUntrackedObjects();\n"
+ "for (var i = 0; i < 3; ++i)\n"
+ " a.shift();\n"
+ "findUntrackedObjects();\n");
+}
+
+
+static const char* record_trace_tree_source =
+"var topFunctions = [];\n"
+"var global = this;\n"
+"function generateFunctions(width, depth) {\n"
+" var script = [];\n"
+" for (var i = 0; i < width; i++) {\n"
+" for (var j = 0; j < depth; j++) {\n"
+" script.push('function f_' + i + '_' + j + '(x) {\\n');\n"
+" script.push(' try {\\n');\n"
+" if (j < depth-2) {\n"
+" script.push(' return f_' + i + '_' + (j+1) + '(x+1);\\n');\n"
+" } else if (j == depth - 2) {\n"
+" script.push(' return new f_' + i + '_' + (depth - 1) + '();\\n');\n"
+" } else if (j == depth - 1) {\n"
+" script.push(' this.ts = Date.now();\\n');\n"
+" }\n"
+" script.push(' } catch (e) {}\\n');\n"
+" script.push('}\\n');\n"
+" \n"
+" }\n"
+" }\n"
+" var script = script.join('');\n"
+" // throw script;\n"
+" global.eval(script);\n"
+" for (var i = 0; i < width; i++) {\n"
+" topFunctions.push(this['f_' + i + '_0']);\n"
+" }\n"
+"}\n"
+"\n"
+"var width = 3;\n"
+"var depth = 3;\n"
+"generateFunctions(width, depth);\n"
+"var instances = [];\n"
+"function start() {\n"
+" for (var i = 0; i < width; i++) {\n"
+" instances.push(topFunctions[i](0));\n"
+" }\n"
+"}\n"
+"\n"
+"for (var i = 0; i < 100; i++) start();\n";
+
+
+static i::HeapSnapshot* ToInternal(const v8::HeapSnapshot* snapshot) {
+ return const_cast<i::HeapSnapshot*>(
+ reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+}
+
+
+static AllocationTraceNode* FindNode(
+ AllocationTracker* tracker, const Vector<const char*>& names) {
+ AllocationTraceNode* node = tracker->trace_tree()->root();
+ for (int i = 0; node != NULL && i < names.length(); i++) {
+ const char* name = names[i];
+ Vector<AllocationTraceNode*> children = node->children();
+ node = NULL;
+ for (int j = 0; j < children.length(); j++) {
+ v8::SnapshotObjectId id = children[j]->function_id();
+ AllocationTracker::FunctionInfo* info = tracker->GetFunctionInfo(id);
+ if (info && strcmp(info->name, name) == 0) {
+ node = children[j];
+ break;
+ }
+ }
+ }
+ return node;
+}
+
+
+TEST(TrackHeapAllocations) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->StartRecordingHeapAllocations();
+
+ CompileRun(record_trace_tree_source);
+
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot(
+ v8::String::New("Test"));
+ i::HeapSnapshotsCollection* collection = ToInternal(snapshot)->collection();
+ AllocationTracker* tracker = collection->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ const char* names[] =
+ { "(anonymous function)", "start", "f_0_0", "f_0_1", "f_0_2" };
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_GE(node->allocation_count(), 100);
+ CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+ heap_profiler->StopRecordingHeapAllocations();
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 9d74011fde..74c2b75811 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -42,8 +42,8 @@ using namespace v8::internal;
// Go through all incremental marking steps in one swoop.
static void SimulateIncrementalMarking() {
- MarkCompactCollector* collector = HEAP->mark_compact_collector();
- IncrementalMarking* marking = HEAP->incremental_marking();
+ MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+ IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (collector->IsConcurrentSweepingInProgress()) {
collector->WaitUntilSweepingCompleted();
}
@@ -62,9 +62,9 @@ static void SimulateIncrementalMarking() {
static void CheckMap(Map* map, int type, int instance_size) {
CHECK(map->IsHeapObject());
#ifdef DEBUG
- CHECK(HEAP->Contains(map));
+ CHECK(CcTest::heap()->Contains(map));
#endif
- CHECK_EQ(HEAP->meta_map(), map->map());
+ CHECK_EQ(CcTest::heap()->meta_map(), map->map());
CHECK_EQ(type, map->instance_type());
CHECK_EQ(instance_size, map->instance_size());
}
@@ -72,10 +72,11 @@ static void CheckMap(Map* map, int type, int instance_size) {
TEST(HeapMaps) {
CcTest::InitializeVM();
- CheckMap(HEAP->meta_map(), MAP_TYPE, Map::kSize);
- CheckMap(HEAP->heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
- CheckMap(HEAP->fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
- CheckMap(HEAP->string_map(), STRING_TYPE, kVariableSizeSentinel);
+ Heap* heap = CcTest::heap();
+ CheckMap(heap->meta_map(), MAP_TYPE, Map::kSize);
+ CheckMap(heap->heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
+ CheckMap(heap->fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ CheckMap(heap->string_map(), STRING_TYPE, kVariableSizeSentinel);
}
@@ -99,7 +100,7 @@ static void CheckSmi(Isolate* isolate, int value, const char* string) {
static void CheckNumber(Isolate* isolate, double value, const char* string) {
- Object* obj = HEAP->NumberFromDouble(value)->ToObjectChecked();
+ Object* obj = CcTest::heap()->NumberFromDouble(value)->ToObjectChecked();
CHECK(obj->IsNumber());
bool exc;
Handle<Object> handle(obj, isolate);
@@ -148,7 +149,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
TEST(HeapObjects) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
@@ -209,10 +210,9 @@ TEST(HeapObjects) {
CHECK(s->IsString());
CHECK_EQ(10, s->length());
- String* object_string = String::cast(heap->Object_string());
- CHECK(
- Isolate::Current()->context()->global_object()->HasLocalProperty(
- object_string));
+ Handle<String> object_string = Handle<String>::cast(factory->Object_string());
+ Handle<GlobalObject> global(CcTest::i_isolate()->context()->global_object());
+ CHECK(JSReceiver::HasLocalProperty(global, object_string));
// Check ToString for oddballs
CheckOddball(isolate, heap->true_value(), "true");
@@ -250,7 +250,7 @@ TEST(Tagging) {
TEST(GarbageCollection) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
@@ -258,10 +258,13 @@ TEST(GarbageCollection) {
// Check GC.
heap->CollectGarbage(NEW_SPACE);
+ Handle<GlobalObject> global(CcTest::i_isolate()->context()->global_object());
Handle<String> name = factory->InternalizeUtf8String("theFunction");
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<String> prop_namex = factory->InternalizeUtf8String("theSlotx");
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
+ Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
+ Handle<Smi> twenty_four(Smi::FromInt(24), isolate);
{
HandleScope inner_scope(isolate);
@@ -271,14 +274,11 @@ TEST(GarbageCollection) {
Handle<Map> initial_map =
factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Isolate::Current()->context()->global_object()->SetProperty(
- *name, *function, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(global, name, function, NONE, kNonStrictMode);
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = factory->NewJSObject(function);
- obj->SetProperty(
- *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
- obj->SetProperty(
- *prop_namex, Smi::FromInt(24), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_namex, twenty_four, NONE, kNonStrictMode);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
@@ -287,10 +287,9 @@ TEST(GarbageCollection) {
heap->CollectGarbage(NEW_SPACE);
// Function should be alive.
- CHECK(Isolate::Current()->context()->global_object()->
- HasLocalProperty(*name));
+ CHECK(JSReceiver::HasLocalProperty(global, name));
// Check function is retained.
- Object* func_value = Isolate::Current()->context()->global_object()->
+ Object* func_value = CcTest::i_isolate()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
@@ -299,20 +298,17 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = factory->NewJSObject(function);
- Isolate::Current()->context()->global_object()->SetProperty(
- *obj_name, *obj, NONE, kNonStrictMode)->ToObjectChecked();
- obj->SetProperty(
- *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(global, obj_name, obj, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
}
// After gc, it should survive.
heap->CollectGarbage(NEW_SPACE);
- CHECK(Isolate::Current()->context()->global_object()->
- HasLocalProperty(*obj_name));
- CHECK(Isolate::Current()->context()->global_object()->
+ CHECK(JSReceiver::HasLocalProperty(global, obj_name));
+ CHECK(CcTest::i_isolate()->context()->global_object()->
GetProperty(*obj_name)->ToObjectChecked()->IsJSObject());
- Object* obj = Isolate::Current()->context()->global_object()->
+ Object* obj = CcTest::i_isolate()->context()->global_object()->
GetProperty(*obj_name)->ToObjectChecked();
JSObject* js_obj = JSObject::cast(obj);
CHECK_EQ(Smi::FromInt(23), js_obj->GetProperty(*prop_name));
@@ -343,7 +339,7 @@ TEST(String) {
TEST(LocalHandles) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -355,7 +351,7 @@ TEST(LocalHandles) {
TEST(GlobalHandles) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -408,7 +404,7 @@ static void TestWeakGlobalHandleCallback(v8::Isolate* isolate,
TEST(WeakGlobalHandlesScavenge) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -449,7 +445,7 @@ TEST(WeakGlobalHandlesScavenge) {
TEST(WeakGlobalHandlesMark) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -495,7 +491,7 @@ TEST(WeakGlobalHandlesMark) {
TEST(DeleteWeakGlobalHandle) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
GlobalHandles* global_handles = isolate->global_handles();
@@ -594,12 +590,12 @@ static const char* not_so_random_string_table[] = {
static void CheckInternalizedStrings(const char** strings) {
for (const char* string = *strings; *strings != 0; string = *strings++) {
Object* a;
- MaybeObject* maybe_a = HEAP->InternalizeUtf8String(string);
+ MaybeObject* maybe_a = CcTest::heap()->InternalizeUtf8String(string);
// InternalizeUtf8String may return a failure if a GC is needed.
if (!maybe_a->ToObject(&a)) continue;
CHECK(a->IsInternalizedString());
Object* b;
- MaybeObject* maybe_b = HEAP->InternalizeUtf8String(string);
+ MaybeObject* maybe_b = CcTest::heap()->InternalizeUtf8String(string);
if (!maybe_b->ToObject(&b)) continue;
CHECK_EQ(b, a);
CHECK(String::cast(b)->IsUtf8EqualTo(CStrVector(string)));
@@ -617,7 +613,7 @@ TEST(StringTable) {
TEST(FunctionAllocation) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
@@ -628,26 +624,28 @@ TEST(FunctionAllocation) {
factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
+ Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
+ Handle<Smi> twenty_four(Smi::FromInt(24), isolate);
+
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
- obj->SetProperty(
- *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check that we can add properties to function objects.
- function->SetProperty(
- *prop_name, Smi::FromInt(24), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(function, prop_name, twenty_four, NONE,
+ kNonStrictMode);
CHECK_EQ(Smi::FromInt(24), function->GetProperty(*prop_name));
}
TEST(ObjectProperties) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
- String* object_string = String::cast(HEAP->Object_string());
- Object* raw_object = Isolate::Current()->context()->global_object()->
+ String* object_string = String::cast(CcTest::heap()->Object_string());
+ Object* raw_object = CcTest::i_isolate()->context()->global_object()->
GetProperty(object_string)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
@@ -655,69 +653,65 @@ TEST(ObjectProperties) {
Handle<String> first = factory->InternalizeUtf8String("first");
Handle<String> second = factory->InternalizeUtf8String("second");
+ Handle<Smi> one(Smi::FromInt(1), isolate);
+ Handle<Smi> two(Smi::FromInt(2), isolate);
+
// check for empty
- CHECK(!obj->HasLocalProperty(*first));
+ CHECK(!JSReceiver::HasLocalProperty(obj, first));
// add first
- obj->SetProperty(
- *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- CHECK(obj->HasLocalProperty(*first));
+ JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
+ CHECK(JSReceiver::HasLocalProperty(obj, first));
// delete first
JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION);
- CHECK(!obj->HasLocalProperty(*first));
+ CHECK(!JSReceiver::HasLocalProperty(obj, first));
// add first and then second
- obj->SetProperty(
- *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- obj->SetProperty(
- *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
- CHECK(obj->HasLocalProperty(*first));
- CHECK(obj->HasLocalProperty(*second));
+ JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ CHECK(JSReceiver::HasLocalProperty(obj, first));
+ CHECK(JSReceiver::HasLocalProperty(obj, second));
// delete first and then second
JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION);
- CHECK(obj->HasLocalProperty(*second));
+ CHECK(JSReceiver::HasLocalProperty(obj, second));
JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION);
- CHECK(!obj->HasLocalProperty(*first));
- CHECK(!obj->HasLocalProperty(*second));
+ CHECK(!JSReceiver::HasLocalProperty(obj, first));
+ CHECK(!JSReceiver::HasLocalProperty(obj, second));
// add first and then second
- obj->SetProperty(
- *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- obj->SetProperty(
- *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
- CHECK(obj->HasLocalProperty(*first));
- CHECK(obj->HasLocalProperty(*second));
+ JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ CHECK(JSReceiver::HasLocalProperty(obj, first));
+ CHECK(JSReceiver::HasLocalProperty(obj, second));
// delete second and then first
JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION);
- CHECK(obj->HasLocalProperty(*first));
+ CHECK(JSReceiver::HasLocalProperty(obj, first));
JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION);
- CHECK(!obj->HasLocalProperty(*first));
- CHECK(!obj->HasLocalProperty(*second));
+ CHECK(!JSReceiver::HasLocalProperty(obj, first));
+ CHECK(!JSReceiver::HasLocalProperty(obj, second));
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAscii(CStrVector(string1));
- obj->SetProperty(
- *s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(obj, s1, one, NONE, kNonStrictMode);
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
- CHECK(obj->HasLocalProperty(*s1_string));
+ CHECK(JSReceiver::HasLocalProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
- obj->SetProperty(
- *s2_string, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(obj, s2_string, one, NONE, kNonStrictMode);
Handle<String> s2 = factory->NewStringFromAscii(CStrVector(string2));
- CHECK(obj->HasLocalProperty(*s2));
+ CHECK(JSReceiver::HasLocalProperty(obj, s2));
}
TEST(JSObjectMaps) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
@@ -732,8 +726,8 @@ TEST(JSObjectMaps) {
Handle<JSObject> obj = factory->NewJSObject(function);
// Set a propery
- obj->SetProperty(
- *prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
+ Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check the map has changed
@@ -743,12 +737,12 @@ TEST(JSObjectMaps) {
TEST(JSArray) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
Handle<String> name = factory->InternalizeUtf8String("Array");
- Object* raw_object = Isolate::Current()->context()->global_object()->
+ Object* raw_object = CcTest::i_isolate()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
Handle<JSFunction> function = Handle<JSFunction>(
JSFunction::cast(raw_object));
@@ -792,12 +786,12 @@ TEST(JSArray) {
TEST(JSObjectCopy) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
- String* object_string = String::cast(HEAP->Object_string());
- Object* raw_object = Isolate::Current()->context()->global_object()->
+ String* object_string = String::cast(CcTest::heap()->Object_string());
+ Object* raw_object = CcTest::i_isolate()->context()->global_object()->
GetProperty(object_string)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
@@ -805,16 +799,17 @@ TEST(JSObjectCopy) {
Handle<String> first = factory->InternalizeUtf8String("first");
Handle<String> second = factory->InternalizeUtf8String("second");
- obj->SetProperty(
- *first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- obj->SetProperty(
- *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
+ Handle<Smi> one(Smi::FromInt(1), isolate);
+ Handle<Smi> two(Smi::FromInt(2), isolate);
+
+ JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
obj->SetElement(0, *first, NONE, kNonStrictMode)->ToObjectChecked();
obj->SetElement(1, *second, NONE, kNonStrictMode)->ToObjectChecked();
// Make the clone.
- Handle<JSObject> clone = Copy(obj);
+ Handle<JSObject> clone = JSObject::Copy(obj);
CHECK(!clone.is_identical_to(obj));
CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 0));
@@ -824,10 +819,8 @@ TEST(JSObjectCopy) {
CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*second));
// Flip the values.
- clone->SetProperty(
- *first, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
- clone->SetProperty(
- *second, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(clone, first, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(clone, second, one, NONE, kNonStrictMode);
clone->SetElement(0, *second, NONE, kNonStrictMode)->ToObjectChecked();
clone->SetElement(1, *first, NONE, kNonStrictMode)->ToObjectChecked();
@@ -842,7 +835,7 @@ TEST(JSObjectCopy) {
TEST(StringAllocation) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 };
@@ -897,7 +890,7 @@ static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
TEST(Iteration) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -931,7 +924,7 @@ TEST(Iteration) {
objs[next_objs_index++] = Handle<Map>(HeapObject::cast(*objs[0])->map());
CHECK_EQ(objs_count, next_objs_index);
- CHECK_EQ(objs_count, ObjectsFoundInHeap(HEAP, objs, objs_count));
+ CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count));
}
@@ -959,11 +952,12 @@ static int LenFromSize(int size) {
TEST(Regression39128) {
// Test case for crbug.com/39128.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
// Increase the chance of 'bump-the-pointer' allocation in old space.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
v8::HandleScope scope(CcTest::isolate());
@@ -973,7 +967,7 @@ TEST(Regression39128) {
// Step 1: prepare a map for the object. We add 1 inobject property to it.
Handle<JSFunction> object_ctor(
- Isolate::Current()->native_context()->object_function());
+ CcTest::i_isolate()->native_context()->object_function());
CHECK(object_ctor->has_initial_map());
Handle<Map> object_map(object_ctor->initial_map());
// Create a map with single inobject property.
@@ -989,12 +983,12 @@ TEST(Regression39128) {
int allocation_amount = Min(FixedArray::kMaxSize,
Page::kMaxNonCodeHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
- NewSpace* new_space = HEAP->new_space();
+ NewSpace* new_space = heap->new_space();
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
- CHECK(!HEAP->always_allocate());
- Object* array = HEAP->AllocateFixedArray(allocation_len)->ToObjectChecked();
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(allocation_len)->ToObjectChecked();
CHECK(!array->IsFailure());
CHECK(new_space->Contains(array));
}
@@ -1004,12 +998,12 @@ TEST(Regression39128) {
int fixed_array_len = LenFromSize(to_fill);
CHECK(fixed_array_len < FixedArray::kMaxLength);
- CHECK(!HEAP->always_allocate());
- Object* array = HEAP->AllocateFixedArray(fixed_array_len)->ToObjectChecked();
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(fixed_array_len)->ToObjectChecked();
CHECK(!array->IsFailure());
CHECK(new_space->Contains(array));
- Object* object = HEAP->AllocateJSObjectFromMap(*my_map)->ToObjectChecked();
+ Object* object = heap->AllocateJSObjectFromMap(*my_map)->ToObjectChecked();
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
@@ -1021,15 +1015,15 @@ TEST(Regression39128) {
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
- Address old_pointer_space_top = HEAP->old_pointer_space()->top();
+ Address old_pointer_space_top = heap->old_pointer_space()->top();
AlwaysAllocateScope aa_scope;
- Object* clone_obj = HEAP->CopyJSObject(jsobject)->ToObjectChecked();
+ Object* clone_obj = heap->CopyJSObject(jsobject)->ToObjectChecked();
JSObject* clone = JSObject::cast(clone_obj);
if (clone->address() != old_pointer_space_top) {
// Alas, got allocated from free list, we cannot do checks.
return;
}
- CHECK(HEAP->old_pointer_space()->Contains(clone->address()));
+ CHECK(heap->old_pointer_space()->Contains(clone->address()));
}
@@ -1037,8 +1031,9 @@ TEST(TestCodeFlushing) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_optimize_for_size = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
@@ -1055,21 +1050,86 @@ TEST(TestCodeFlushing) {
}
// Check function is compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
+ Object* func_value = CcTest::i_isolate()->context()->global_object()->
GetProperty(*foo_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CHECK(function->shared()->is_compiled());
+
+ // Simulate several GCs that use full marking.
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ }
+
+ // foo should no longer be in the compilation cache
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+ CHECK(!function->is_compiled() || function->IsOptimized());
+ // Call foo to get it recompiled.
+ CompileRun("foo()");
+ CHECK(function->shared()->is_compiled());
+ CHECK(function->is_compiled());
+}
+
+
+TEST(TestCodeFlushingPreAged) {
+ // If we do not flush code this test is invalid.
+ if (!FLAG_flush_code) return;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_optimize_for_size = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(CcTest::isolate());
+ const char* source = "function foo() {"
+ " var x = 42;"
+ " var y = 42;"
+ " var z = x + y;"
+ "};"
+ "foo()";
+ Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+
+ // Compile foo, but don't run it.
+ { v8::HandleScope scope(CcTest::isolate());
+ CompileRun(source);
+ }
+
+ // Check function is compiled.
+ Object* func_value = Isolate::Current()->context()->global_object()->
+ GetProperty(*foo_name)->ToObjectChecked();
+ CHECK(func_value->IsJSFunction());
+ Handle<JSFunction> function(JSFunction::cast(func_value));
+ CHECK(function->shared()->is_compiled());
+
+ // The code has been run so will survive at least one GC.
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CHECK(function->shared()->is_compiled());
+
+ // The code was only run once, so it should be pre-aged and collected on the
+ // next GC.
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+
+ // Execute the function again twice, and ensure it is reset to the young age.
+ { v8::HandleScope scope(CcTest::isolate());
+ CompileRun("foo();"
+ "foo();");
+ }
+
+ // The code will survive at least two GC now that it is young again.
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
}
// foo should no longer be in the compilation cache
@@ -1086,8 +1146,9 @@ TEST(TestCodeFlushingIncremental) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_optimize_for_size = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
@@ -1104,22 +1165,22 @@ TEST(TestCodeFlushingIncremental) {
}
// Check function is compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
+ Object* func_value = CcTest::i_isolate()->context()->global_object()->
GetProperty(*foo_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
}
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
CHECK(!function->is_compiled() || function->IsOptimized());
@@ -1134,7 +1195,7 @@ TEST(TestCodeFlushingIncremental) {
for (int i = 0; i < kAgingThreshold; i++) {
SimulateIncrementalMarking();
if (!function->next_function_link()->IsUndefined()) break;
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
}
// Force optimization while incremental marking is active and while
@@ -1144,7 +1205,7 @@ TEST(TestCodeFlushingIncremental) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1154,8 +1215,9 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_optimize_for_size = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
const char* source = "var foo = function() {"
@@ -1172,7 +1234,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
Handle<String> bar_name = factory->InternalizeUtf8String("bar");
// Perfrom one initial GC to enable code flushing.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1180,12 +1242,12 @@ TEST(TestCodeFlushingIncrementalScavenge) {
}
// Check functions are compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
+ Object* func_value = CcTest::i_isolate()->context()->global_object()->
GetProperty(*foo_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
CHECK(function->shared()->is_compiled());
- Object* func_value2 = Isolate::Current()->context()->global_object()->
+ Object* func_value2 = CcTest::i_isolate()->context()->global_object()->
GetProperty(*bar_name)->ToObjectChecked();
CHECK(func_value2->IsJSFunction());
Handle<JSFunction> function2(JSFunction::cast(func_value2));
@@ -1209,10 +1271,10 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// perform a scavenge while incremental marking is still running.
SimulateIncrementalMarking();
*function2.location() = NULL;
- HEAP->CollectGarbage(NEW_SPACE, "test scavenge while marking");
+ CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking");
// Simulate one final GC to make sure the candidate queue is sane.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
CHECK(!function->is_compiled() || function->IsOptimized());
}
@@ -1222,8 +1284,9 @@ TEST(TestCodeFlushingIncrementalAbort) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_optimize_for_size = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
@@ -1241,7 +1304,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
}
// Check function is compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
+ Object* func_value = CcTest::i_isolate()->context()->global_object()->
GetProperty(*foo_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function(JSFunction::cast(func_value));
@@ -1287,7 +1350,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
// Count the number of native contexts in the weak list of native contexts.
int CountNativeContexts() {
int count = 0;
- Object* object = HEAP->native_contexts_list();
+ Object* object = CcTest::heap()->native_contexts_list();
while (!object->IsUndefined()) {
count++;
object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
@@ -1319,7 +1382,7 @@ TEST(TestInternalWeakLists) {
static const int kNumTestContexts = 10;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
v8::Handle<v8::Context> ctx[kNumTestContexts];
@@ -1328,7 +1391,7 @@ TEST(TestInternalWeakLists) {
// Create a number of global contests which gets linked together.
for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i] = v8::Context::New(v8::Isolate::GetCurrent());
+ ctx[i] = v8::Context::New(CcTest::isolate());
// Collect garbage that might have been created by one of the
// installed extensions.
@@ -1367,7 +1430,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
}
@@ -1379,41 +1442,41 @@ TEST(TestInternalWeakLists) {
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
}
// Force compilation cache cleanup.
- HEAP->NotifyContextDisposed();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->NotifyContextDisposed();
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
// TODO(dcarney): is there a better way to do this?
i::Object** unsafe = reinterpret_cast<i::Object**>(*ctx[i]);
- *unsafe = HEAP->undefined_value();
+ *unsafe = CcTest::heap()->undefined_value();
ctx[i].Clear();
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- HEAP->PerformScavenge();
+ CcTest::heap()->PerformScavenge();
CHECK_EQ(kNumTestContexts - i, CountNativeContexts());
}
// Mark compact handles the weak references.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts());
}
@@ -1462,7 +1525,7 @@ static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
TEST(TestInternalWeakListsTraverseWithGC) {
v8::V8::Initialize();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
static const int kNumTestContexts = 10;
@@ -1474,7 +1537,7 @@ TEST(TestInternalWeakListsTraverseWithGC) {
// Create an number of contexts and check the length of the weak list both
// with and without GCs while iterating the list.
for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i] = v8::Context::New(v8::Isolate::GetCurrent());
+ ctx[i] = v8::Context::New(CcTest::isolate());
CHECK_EQ(i + 1, CountNativeContexts());
CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1));
}
@@ -1516,13 +1579,13 @@ TEST(TestSizeOfObjects) {
// Get initial heap size after several full GCs, which will stabilize
// the heap size and return with sweeping finished completely.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(HEAP->old_pointer_space()->IsLazySweepingComplete());
- int initial_size = static_cast<int>(HEAP->SizeOfObjects());
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK(CcTest::heap()->old_pointer_space()->IsLazySweepingComplete());
+ int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
{
// Allocate objects on several different old-space pages so that
@@ -1530,33 +1593,33 @@ TEST(TestSizeOfObjects) {
AlwaysAllocateScope always_allocate;
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
- HEAP->AllocateFixedArray(8192, TENURED)->ToObjectChecked();
+ CcTest::heap()->AllocateFixedArray(8192, TENURED)->ToObjectChecked();
CHECK_EQ(initial_size + i * filler_size,
- static_cast<int>(HEAP->SizeOfObjects()));
+ static_cast<int>(CcTest::heap()->SizeOfObjects()));
}
}
// The heap size should go back to initial size after a full GC, even
// though sweeping didn't finish yet.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// Normally sweeping would not be complete here, but no guarantees.
- CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
+ CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
// Advancing the sweeper step-wise should not change the heap size.
- while (!HEAP->old_pointer_space()->IsLazySweepingComplete()) {
- HEAP->old_pointer_space()->AdvanceSweeper(KB);
- CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
+ while (!CcTest::heap()->old_pointer_space()->IsLazySweepingComplete()) {
+ CcTest::heap()->old_pointer_space()->AdvanceSweeper(KB);
+ CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
}
}
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
CcTest::InitializeVM();
- HEAP->EnsureHeapIsIterable();
- intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
- HeapIterator iterator(HEAP);
+ CcTest::heap()->EnsureHeapIsIterable();
+ intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects();
+ HeapIterator iterator(CcTest::heap());
intptr_t size_of_objects_2 = 0;
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -1605,10 +1668,11 @@ static void FillUpNewSpace(NewSpace* new_space) {
TEST(GrowAndShrinkNewSpace) {
CcTest::InitializeVM();
- NewSpace* new_space = HEAP->new_space();
+ Heap* heap = CcTest::heap();
+ NewSpace* new_space = heap->new_space();
- if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize() ||
- HEAP->MaxSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
+ if (heap->ReservedSemiSpaceSize() == heap->InitialSemiSpaceSize() ||
+ heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
// The max size cannot exceed the reserved size, since semispaces must be
// always within the reserved space. We can't test new space growing and
// shrinking if the reserved size is the same as the minimum (initial) size.
@@ -1634,7 +1698,7 @@ TEST(GrowAndShrinkNewSpace) {
CHECK(old_capacity == new_capacity);
// Let the scavenger empty the new space.
- HEAP->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
CHECK_LE(new_space->Size(), old_capacity);
// Explicitly shrinking should halve the space capacity.
@@ -1655,9 +1719,9 @@ TEST(GrowAndShrinkNewSpace) {
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
CcTest::InitializeVM();
-
- if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize() ||
- HEAP->MaxSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
+ Heap* heap = CcTest::heap();
+ if (heap->ReservedSemiSpaceSize() == heap->InitialSemiSpaceSize() ||
+ heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
// The max size cannot exceed the reserved size, since semispaces must be
// always within the reserved space. We can't test new space growing and
// shrinking if the reserved size is the same as the minimum (initial) size.
@@ -1665,14 +1729,14 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
}
v8::HandleScope scope(CcTest::isolate());
- NewSpace* new_space = HEAP->new_space();
+ NewSpace* new_space = heap->new_space();
intptr_t old_capacity, new_capacity;
old_capacity = new_space->Capacity();
new_space->Grow();
new_capacity = new_space->Capacity();
CHECK(2 * old_capacity == new_capacity);
FillUpNewSpace(new_space);
- HEAP->CollectAllAvailableGarbage();
+ heap->CollectAllAvailableGarbage();
new_capacity = new_space->Capacity();
CHECK(old_capacity == new_capacity);
}
@@ -1680,7 +1744,7 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
static int NumberOfGlobalObjects() {
int count = 0;
- HeapIterator iterator(HEAP);
+ HeapIterator iterator(CcTest::heap());
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsGlobalObject()) count++;
}
@@ -1692,7 +1756,7 @@ static int NumberOfGlobalObjects() {
// optimized code.
TEST(LeakNativeContextViaMap) {
i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
v8::Persistent<v8::Context> ctx1p;
v8::Persistent<v8::Context> ctx2p;
@@ -1703,7 +1767,7 @@ TEST(LeakNativeContextViaMap) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
@@ -1726,10 +1790,10 @@ TEST(LeakNativeContextViaMap) {
ctx1p.Dispose();
v8::V8::ContextDisposedNotification();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
ctx2p.Dispose();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1738,7 +1802,7 @@ TEST(LeakNativeContextViaMap) {
// optimized code.
TEST(LeakNativeContextViaFunction) {
i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
v8::Persistent<v8::Context> ctx1p;
v8::Persistent<v8::Context> ctx2p;
@@ -1749,7 +1813,7 @@ TEST(LeakNativeContextViaFunction) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
@@ -1772,17 +1836,17 @@ TEST(LeakNativeContextViaFunction) {
ctx1p.Dispose();
v8::V8::ContextDisposedNotification();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
ctx2p.Dispose();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
TEST(LeakNativeContextViaMapKeyed) {
i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
v8::Persistent<v8::Context> ctx1p;
v8::Persistent<v8::Context> ctx2p;
@@ -1793,7 +1857,7 @@ TEST(LeakNativeContextViaMapKeyed) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
@@ -1816,17 +1880,17 @@ TEST(LeakNativeContextViaMapKeyed) {
ctx1p.Dispose();
v8::V8::ContextDisposedNotification();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
ctx2p.Dispose();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
TEST(LeakNativeContextViaMapProto) {
i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer_scope(isolate);
v8::Persistent<v8::Context> ctx1p;
v8::Persistent<v8::Context> ctx2p;
@@ -1837,7 +1901,7 @@ TEST(LeakNativeContextViaMapProto) {
v8::Local<v8::Context>::New(isolate, ctx1p)->Enter();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(4, NumberOfGlobalObjects());
{
@@ -1864,10 +1928,10 @@ TEST(LeakNativeContextViaMapProto) {
ctx1p.Dispose();
v8::V8::ContextDisposedNotification();
}
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(2, NumberOfGlobalObjects());
ctx2p.Dispose();
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1879,12 +1943,12 @@ TEST(InstanceOfStubWriteBarrier) {
#endif
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_crankshaft()) return;
if (i::FLAG_force_marking_deque_overflows) return;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
+ v8::HandleScope outer_scope(CcTest::isolate());
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CompileRun(
"function foo () { }"
"function mkbar () { return new (new Function(\"\")) (); }"
@@ -1895,14 +1959,14 @@ TEST(InstanceOfStubWriteBarrier) {
"f(new foo()); g();");
}
- IncrementalMarking* marking = HEAP->incremental_marking();
+ IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Abort();
marking->Start();
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->IsOptimized());
@@ -1916,21 +1980,21 @@ TEST(InstanceOfStubWriteBarrier) {
CHECK(marking->IsMarking());
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Handle<v8::Object> global = CcTest::global();
v8::Handle<v8::Function> g =
v8::Handle<v8::Function>::Cast(global->Get(v8_str("g")));
g->Call(global, 0, NULL);
}
- HEAP->incremental_marking()->set_should_hurry(true);
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ CcTest::heap()->incremental_marking()->set_should_hurry(true);
+ CcTest::heap()->CollectGarbage(OLD_POINTER_SPACE);
}
TEST(PrototypeTransitionClearing) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -1947,11 +2011,11 @@ TEST(PrototypeTransitionClearing) {
Handle<JSObject> baseObject =
v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("base"))));
+ CcTest::global()->Get(v8_str("base"))));
// Verify that only dead prototype transitions are cleared.
CHECK_EQ(10, baseObject->map()->NumberOfProtoTransitions());
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
const int transitions = 10 - 3;
CHECK_EQ(transitions, baseObject->map()->NumberOfProtoTransitions());
@@ -1967,7 +2031,7 @@ TEST(PrototypeTransitionClearing) {
// Make sure next prototype is placed on an old-space evacuation candidate.
Handle<JSObject> prototype;
- PagedSpace* space = HEAP->old_pointer_space();
+ PagedSpace* space = CcTest::heap()->old_pointer_space();
{
AlwaysAllocateScope always_allocate;
SimulateFullSpace(space);
@@ -1983,7 +2047,7 @@ TEST(PrototypeTransitionClearing) {
CHECK(space->LastPage()->Contains(prototype->address()));
JSObject::SetPrototype(baseObject, prototype, false);
CHECK(Map::GetPrototypeTransition(map, prototype)->IsMap());
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(Map::GetPrototypeTransition(map, prototype)->IsMap());
}
@@ -1996,11 +2060,11 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
#endif
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft()) return;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
+ if (!CcTest::i_isolate()->use_crankshaft()) return;
+ v8::HandleScope outer_scope(CcTest::isolate());
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
CompileRun(
"function f () {"
" var s = 0;"
@@ -2014,14 +2078,14 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->IsOptimized());
- IncrementalMarking* marking = HEAP->incremental_marking();
+ IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Abort();
marking->Start();
- // The following two calls will increment HEAP->global_ic_age().
+ // The following two calls will increment CcTest::heap()->global_ic_age().
const int kLongIdlePauseInMs = 1000;
v8::V8::ContextDisposedNotification();
v8::V8::IdleNotification(kLongIdlePauseInMs);
@@ -2035,11 +2099,11 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
// guard interrupt. But here we didn't ask for that, and there is no
// JS code running to trigger the interrupt, so we explicitly finalize
// here.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags,
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags,
"Test finalizing incremental mark-sweep");
}
- CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
CHECK_EQ(0, f->shared()->code()->profiler_ticks());
}
@@ -2053,7 +2117,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
#endif
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_crankshaft()) return;
v8::HandleScope outer_scope(CcTest::isolate());
{
@@ -2071,18 +2135,18 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->IsOptimized());
- HEAP->incremental_marking()->Abort();
+ CcTest::heap()->incremental_marking()->Abort();
- // The following two calls will increment HEAP->global_ic_age().
+ // The following two calls will increment CcTest::heap()->global_ic_age().
// Since incremental marking is off, IdleNotification will do full GC.
const int kLongIdlePauseInMs = 1000;
v8::V8::ContextDisposedNotification();
v8::V8::IdleNotification(kLongIdlePauseInMs);
- CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
CHECK_EQ(0, f->shared()->code()->profiler_ticks());
}
@@ -2092,11 +2156,11 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
TEST(OptimizedAllocationAlwaysInNewSpace) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- SimulateFullSpace(HEAP->new_space());
+ SimulateFullSpace(CcTest::heap()->new_space());
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
@@ -2114,17 +2178,17 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InNewSpace(*o));
+ CHECK(CcTest::heap()->InNewSpace(*o));
}
TEST(OptimizedPretenuringAllocationFolding) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function DataObject() {"
@@ -2145,22 +2209,22 @@ TEST(OptimizedPretenuringAllocationFolding) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(0)));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(1)));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(2)));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(3)));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(4)));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(5)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(0)));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(1)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(2)));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(3)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(4)));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(5)));
}
TEST(OptimizedPretenuringAllocationFoldingBlocks) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function DataObject() {"
@@ -2181,22 +2245,22 @@ TEST(OptimizedPretenuringAllocationFoldingBlocks) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(0)));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(1)));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(2)));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(3)));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(4)));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(5)));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(0)));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(1)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(2)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(3)));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(4)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(5)));
}
TEST(OptimizedPretenuringObjectArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2210,18 +2274,18 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(o->elements()));
- CHECK(HEAP->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
}
TEST(OptimizedPretenuringMixedInObjectProperties) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2235,24 +2299,24 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(*o));
- CHECK(HEAP->InOldPointerSpace(o->RawFastPropertyAt(0)));
- CHECK(HEAP->InOldDataSpace(o->RawFastPropertyAt(1)));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(0)));
+ CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(1)));
JSObject* inner_object = reinterpret_cast<JSObject*>(o->RawFastPropertyAt(0));
- CHECK(HEAP->InOldPointerSpace(inner_object));
- CHECK(HEAP->InOldDataSpace(inner_object->RawFastPropertyAt(0)));
- CHECK(HEAP->InOldPointerSpace(inner_object->RawFastPropertyAt(1)));
+ CHECK(CcTest::heap()->InOldPointerSpace(inner_object));
+ CHECK(CcTest::heap()->InOldDataSpace(inner_object->RawFastPropertyAt(0)));
+ CHECK(CcTest::heap()->InOldPointerSpace(inner_object->RawFastPropertyAt(1)));
}
TEST(OptimizedPretenuringDoubleArrayProperties) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2266,18 +2330,18 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(*o));
- CHECK(HEAP->InOldDataSpace(o->properties()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldDataSpace(o->properties()));
}
TEST(OptimizedPretenuringdoubleArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2291,18 +2355,18 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldDataSpace(o->elements()));
- CHECK(HEAP->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldDataSpace(o->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
}
TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2322,21 +2386,21 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(*o));
- CHECK(HEAP->InOldPointerSpace(*int_array_handle));
- CHECK(HEAP->InOldPointerSpace(int_array_handle->elements()));
- CHECK(HEAP->InOldPointerSpace(*double_array_handle));
- CHECK(HEAP->InOldDataSpace(double_array_handle->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldPointerSpace(*int_array_handle));
+ CHECK(CcTest::heap()->InOldPointerSpace(int_array_handle->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*double_array_handle));
+ CHECK(CcTest::heap()->InOldDataSpace(double_array_handle->elements()));
}
TEST(OptimizedPretenuringNestedObjectLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2356,21 +2420,21 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(*o));
- CHECK(HEAP->InOldPointerSpace(*int_array_handle_1));
- CHECK(HEAP->InOldPointerSpace(int_array_handle_1->elements()));
- CHECK(HEAP->InOldPointerSpace(*int_array_handle_2));
- CHECK(HEAP->InOldPointerSpace(int_array_handle_2->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldPointerSpace(*int_array_handle_1));
+ CHECK(CcTest::heap()->InOldPointerSpace(int_array_handle_1->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*int_array_handle_2));
+ CHECK(CcTest::heap()->InOldPointerSpace(int_array_handle_2->elements()));
}
TEST(OptimizedPretenuringNestedDoubleLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
v8::Local<v8::Value> res = CompileRun(
"function f() {"
@@ -2392,11 +2456,11 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(*o));
- CHECK(HEAP->InOldPointerSpace(*double_array_handle_1));
- CHECK(HEAP->InOldDataSpace(double_array_handle_1->elements()));
- CHECK(HEAP->InOldPointerSpace(*double_array_handle_2));
- CHECK(HEAP->InOldDataSpace(double_array_handle_2->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldPointerSpace(*double_array_handle_1));
+ CHECK(CcTest::heap()->InOldDataSpace(double_array_handle_1->elements()));
+ CHECK(CcTest::heap()->InOldPointerSpace(*double_array_handle_2));
+ CHECK(CcTest::heap()->InOldDataSpace(double_array_handle_2->elements()));
}
@@ -2404,7 +2468,7 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2423,7 +2487,7 @@ TEST(OptimizedAllocationArrayLiterals) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InNewSpace(o->elements()));
+ CHECK(CcTest::heap()->InNewSpace(o->elements()));
}
@@ -2431,10 +2495,10 @@ TEST(OptimizedPretenuringCallNew) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_pretenuring_call_new = true;
CcTest::InitializeVM();
- if (!i::Isolate::Current()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
- HEAP->SetNewSpaceHighPromotionModeActive(true);
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
@@ -2448,7 +2512,7 @@ TEST(OptimizedPretenuringCallNew) {
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InOldPointerSpace(*o));
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
}
@@ -2480,7 +2544,7 @@ TEST(Regress1465) {
Handle<JSObject> root =
v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("root"))));
+ CcTest::global()->Get(v8_str("root"))));
// Count number of live transitions before marking.
int transitions_before = CountMapTransitions(root->map());
@@ -2488,7 +2552,7 @@ TEST(Regress1465) {
CHECK_EQ(transitions_count, transitions_before);
SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -2522,15 +2586,15 @@ TEST(Regress2143a) {
"f(root);");
// This bug only triggers with aggressive IC clearing.
- HEAP->AgeInlineCaches();
+ CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
Handle<JSObject> root =
v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("root"))));
+ CcTest::global()->Get(v8_str("root"))));
// The root object should be in a sane state.
CHECK(root->IsJSObject());
@@ -2566,15 +2630,15 @@ TEST(Regress2143b) {
"%DeoptimizeFunction(f);");
// This bug only triggers with aggressive IC clearing.
- HEAP->AgeInlineCaches();
+ CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
Handle<JSObject> root =
v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("root"))));
+ CcTest::global()->Get(v8_str("root"))));
// The root object should be in a sane state.
CHECK(root->IsJSObject());
@@ -2588,13 +2652,14 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_crankshaft = false;
i::FLAG_always_opt = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
static const int number_of_test_pages = 20;
// Prepare many pages with low live-bytes count.
- PagedSpace* old_pointer_space = HEAP->old_pointer_space();
+ PagedSpace* old_pointer_space = heap->old_pointer_space();
CHECK_EQ(1, old_pointer_space->CountTotalPages());
for (int i = 0; i < number_of_test_pages; i++) {
AlwaysAllocateScope always_allocate;
@@ -2605,14 +2670,14 @@ TEST(ReleaseOverReservedPages) {
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
+ heap->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
@@ -2622,7 +2687,7 @@ TEST(ReleaseOverReservedPages) {
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
- HEAP->CollectAllAvailableGarbage("triggered really hard");
+ heap->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages());
}
@@ -2630,10 +2695,10 @@ TEST(ReleaseOverReservedPages) {
TEST(Regress2237) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
- Handle<String> slice(HEAP->empty_string());
+ Handle<String> slice(CcTest::heap()->empty_string());
{
// Generate a parent that lives in new-space.
@@ -2641,20 +2706,20 @@ TEST(Regress2237) {
const char* c = "This text is long enough to trigger sliced strings.";
Handle<String> s = factory->NewStringFromAscii(CStrVector(c));
CHECK(s->IsSeqOneByteString());
- CHECK(HEAP->InNewSpace(*s));
+ CHECK(CcTest::heap()->InNewSpace(*s));
// Generate a sliced string that is based on the above parent and
// lives in old-space.
- SimulateFullSpace(HEAP->new_space());
+ SimulateFullSpace(CcTest::heap()->new_space());
AlwaysAllocateScope always_allocate;
Handle<String> t = factory->NewProperSubString(s, 5, 35);
CHECK(t->IsSlicedString());
- CHECK(!HEAP->InNewSpace(*t));
+ CHECK(!CcTest::heap()->InNewSpace(*t));
*slice.location() = *t.location();
}
CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
}
@@ -2669,7 +2734,7 @@ TEST(PrintSharedFunctionInfo) {
Handle<JSFunction> g =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
+ CcTest::global()->Get(v8_str("g"))));
DisallowHeapAllocation no_allocation;
g->shared()->PrintLn();
@@ -2683,7 +2748,7 @@ TEST(Regress2211) {
v8::Handle<v8::String> value = v8_str("val string");
Smi* hash = Smi::FromInt(321);
- Heap* heap = Isolate::Current()->heap();
+ Heap* heap = CcTest::heap();
for (int i = 0; i < 2; i++) {
// Store identity hash first and common hidden property second.
@@ -2732,13 +2797,13 @@ TEST(IncrementalMarkingClearsTypeFeedbackCells) {
// Prepare function f that contains type feedback for closures
// originating from two different native contexts.
- v8::Context::GetCurrent()->Global()->Set(v8_str("fun1"), fun1);
- v8::Context::GetCurrent()->Global()->Set(v8_str("fun2"), fun2);
+ CcTest::global()->Set(v8_str("fun1"), fun1);
+ CcTest::global()->Set(v8_str("fun2"), fun2);
CompileRun("function f(a, b) { a(); b(); } f(fun1, fun2);");
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
Handle<TypeFeedbackCells> cells(TypeFeedbackInfo::cast(
f->shared()->code()->type_feedback_info())->type_feedback_cells());
@@ -2747,7 +2812,7 @@ TEST(IncrementalMarkingClearsTypeFeedbackCells) {
CHECK(cells->GetCell(1)->value()->IsJSFunction());
SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(2, cells->CellCount());
CHECK(cells->GetCell(0)->value()->IsTheHole());
@@ -2783,13 +2848,13 @@ TEST(IncrementalMarkingPreservesMonomorhpicIC) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
CHECK(ic_before->ic_state() == MONOMORPHIC);
SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
CHECK(ic_after->ic_state() == MONOMORPHIC);
@@ -2810,12 +2875,12 @@ TEST(IncrementalMarkingClearsMonomorhpicIC) {
// Prepare function f that contains a monomorphic IC for object
// originating from a different native context.
- v8::Context::GetCurrent()->Global()->Set(v8_str("obj1"), obj1);
+ CcTest::global()->Set(v8_str("obj1"), obj1);
CompileRun("function f(o) { return o.x; } f(obj1); f(obj1);");
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
CHECK(ic_before->ic_state() == MONOMORPHIC);
@@ -2823,10 +2888,10 @@ TEST(IncrementalMarkingClearsMonomorhpicIC) {
// Fire context dispose notification.
v8::V8::ContextDisposedNotification();
SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
- CHECK(ic_after->ic_state() == UNINITIALIZED);
+ CHECK(IC::IsCleared(ic_after));
}
@@ -2850,13 +2915,13 @@ TEST(IncrementalMarkingClearsPolymorhpicIC) {
// Prepare function f that contains a polymorphic IC for objects
// originating from two different native contexts.
- v8::Context::GetCurrent()->Global()->Set(v8_str("obj1"), obj1);
- v8::Context::GetCurrent()->Global()->Set(v8_str("obj2"), obj2);
+ CcTest::global()->Set(v8_str("obj1"), obj1);
+ CcTest::global()->Set(v8_str("obj2"), obj2);
CompileRun("function f(o) { return o.x; } f(obj1); f(obj1); f(obj2);");
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
CHECK(ic_before->ic_state() == POLYMORPHIC);
@@ -2864,10 +2929,10 @@ TEST(IncrementalMarkingClearsPolymorhpicIC) {
// Fire context dispose notification.
v8::V8::ContextDisposedNotification();
SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
- CHECK(ic_after->ic_state() == UNINITIALIZED);
+ CHECK(IC::IsCleared(ic_after));
}
@@ -2900,21 +2965,20 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
// resource's callback is fired when the external string is GC'ed.
FLAG_use_ic = false; // ICs retain objects.
FLAG_concurrent_recompilation = false;
- CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));
{
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::String> source_string = v8::String::NewExternal(resource);
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
v8::Script::Compile(source_string)->Run();
CHECK(!resource->IsDisposed());
}
- // HEAP->CollectAllAvailableGarbage();
+ // CcTest::heap()->CollectAllAvailableGarbage();
CHECK(!resource->IsDisposed());
CompileRun(accessor);
- HEAP->CollectAllAvailableGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage();
// External source has been released.
CHECK(resource->IsDisposed());
@@ -2923,6 +2987,7 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
TEST(ReleaseStackTraceData) {
+ CcTest::InitializeVM();
static const char* source1 = "var error = null; "
/* Normal Error */ "try { "
" throw new Error(); "
@@ -2968,7 +3033,7 @@ TEST(ReleaseStackTraceData) {
TEST(Regression144230) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3009,9 +3074,10 @@ TEST(Regression144230) {
// visited later, causing the CallIC to be cleared.
Handle<String> name = isolate->factory()->InternalizeUtf8String("call");
Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<Smi> zero(Smi::FromInt(0), isolate);
MaybeObject* maybe_call = global->GetProperty(*name);
JSFunction* call = JSFunction::cast(maybe_call->ToObjectChecked());
- USE(global->SetProperty(*name, Smi::FromInt(0), NONE, kNonStrictMode));
+ JSReceiver::SetProperty(global, name, zero, NONE, kNonStrictMode);
isolate->compilation_cache()->Clear();
call->shared()->set_ic_age(heap->global_ic_age() + 1);
Handle<Object> call_code(call->code(), isolate);
@@ -3022,7 +3088,7 @@ TEST(Regression144230) {
// Either heap verification caught the problem already or we go kaboom once
// the CallIC is executed the next time.
- USE(global->SetProperty(*name, *call_function, NONE, kNonStrictMode));
+ JSReceiver::SetProperty(global, name, call_function, NONE, kNonStrictMode);
CompileRun("call();");
}
@@ -3031,7 +3097,7 @@ TEST(Regress159140) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3060,14 +3126,14 @@ TEST(Regress159140) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->is_compiled());
CompileRun("f = null;");
Handle<JSFunction> g =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
+ CcTest::global()->Get(v8_str("g"))));
CHECK(g->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -3093,7 +3159,7 @@ TEST(Regress165495) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3115,7 +3181,7 @@ TEST(Regress165495) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -3142,7 +3208,7 @@ TEST(Regress169209) {
i::FLAG_flush_code_incrementally = true;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3163,7 +3229,7 @@ TEST(Regress169209) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -3184,7 +3250,7 @@ TEST(Regress169209) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("flushMe"))));
+ CcTest::global()->Get(v8_str("flushMe"))));
CHECK(f->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -3228,7 +3294,7 @@ TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_crankshaft = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
@@ -3254,17 +3320,17 @@ TEST(Regress169928) {
v8_str("fastliteralcase(mote, 2.5);");
v8::Local<v8::String> array_name = v8_str("mote");
- v8::Context::GetCurrent()->Global()->Set(array_name, v8::Int32::New(0));
+ CcTest::global()->Set(array_name, v8::Int32::New(0));
// First make sure we flip spaces
- HEAP->CollectGarbage(NEW_SPACE);
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
// Allocate the object.
Handle<FixedArray> array_data = factory->NewFixedArray(2, NOT_TENURED);
array_data->set(0, Smi::FromInt(1));
array_data->set(1, Smi::FromInt(2));
- AllocateAllButNBytes(HEAP->new_space(),
+ AllocateAllButNBytes(CcTest::heap()->new_space(),
JSArray::kSize + AllocationMemento::kSize +
kPointerSize);
@@ -3277,18 +3343,18 @@ TEST(Regress169928) {
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
- MaybeObject* maybe_object = HEAP->AllocateRaw(
+ MaybeObject* maybe_object = CcTest::heap()->AllocateRaw(
AllocationMemento::kSize + kPointerSize, NEW_SPACE, OLD_POINTER_SPACE);
Object* obj = NULL;
CHECK(maybe_object->ToObject(&obj));
Address addr_obj = reinterpret_cast<Address>(
reinterpret_cast<byte*>(obj - kHeapObjectTag));
- HEAP->CreateFillerObjectAt(addr_obj,
+ CcTest::heap()->CreateFillerObjectAt(addr_obj,
AllocationMemento::kSize + kPointerSize);
// Give the array a name, making sure not to allocate strings.
v8::Handle<v8::Object> array_obj = v8::Utils::ToLocal(array);
- v8::Context::GetCurrent()->Global()->Set(array_name, array_obj);
+ CcTest::global()->Set(array_name, array_obj);
// This should crash with a protection violation if we are running a build
// with the bug.
@@ -3303,7 +3369,7 @@ TEST(Regress168801) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3326,7 +3392,7 @@ TEST(Regress168801) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -3359,7 +3425,7 @@ TEST(Regress173458) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3382,7 +3448,7 @@ TEST(Regress173458) {
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CcTest::global()->Get(v8_str("f"))));
CHECK(f->is_compiled());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
@@ -3416,7 +3482,7 @@ class DummyVisitor : public ObjectVisitor {
TEST(DeferredHandles) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
v8::ImplementationUtilities::HandleScopeData* data =
@@ -3444,7 +3510,7 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
" for (var i = 0; i < n; i += 100) a[i] = i;"
"};"
"f(10 * 1024 * 1024);");
- IncrementalMarking* marking = HEAP->incremental_marking();
+ IncrementalMarking* marking = CcTest::heap()->incremental_marking();
if (marking->IsStopped()) marking->Start();
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index d02f4f46e8..65a001d4b3 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -97,7 +97,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int expected_diff_parameter = -1) {
StringCompareInput input(s1, s2);
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk, &zone);
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index a143d583fd..1094512276 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -183,7 +183,7 @@ class IsolateLockingThreadWithLocalContext : public JoinableThread {
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
- LocalContext local_context;
+ LocalContext local_context(isolate_);
CHECK_EQ(isolate_, v8::internal::Isolate::Current());
CalcFibAndCheck();
}
@@ -267,7 +267,7 @@ class IsolateNestedLockingThread : public JoinableThread {
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
- LocalContext local_context;
+ LocalContext local_context(isolate_);
{
v8::Locker another_lock(isolate_);
CalcFibAndCheck();
@@ -311,7 +311,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
v8::Locker lock(isolate1_);
v8::Isolate::Scope isolate_scope(isolate1_);
v8::HandleScope handle_scope(isolate1_);
- LocalContext local_context;
+ LocalContext local_context(isolate1_);
IsolateLockingThreadWithLocalContext threadB(isolate2_);
threadB.Start();
@@ -545,7 +545,7 @@ class LockUnlockLockThread : public JoinableThread {
virtual void Run() {
v8::Locker lock1(isolate_);
CHECK(v8::Locker::IsLocked(isolate_));
- CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
+ CHECK(!v8::Locker::IsLocked(CcTest::isolate()));
{
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
@@ -557,13 +557,13 @@ class LockUnlockLockThread : public JoinableThread {
{
v8::Unlocker unlock1(isolate_);
CHECK(!v8::Locker::IsLocked(isolate_));
- CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
+ CHECK(!v8::Locker::IsLocked(CcTest::isolate()));
{
v8::Locker lock2(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
CHECK(v8::Locker::IsLocked(isolate_));
- CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
+ CHECK(!v8::Locker::IsLocked(CcTest::isolate()));
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
@@ -605,24 +605,26 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
public:
explicit LockUnlockLockDefaultIsolateThread(v8::Handle<v8::Context> context)
: JoinableThread("LockUnlockLockDefaultIsolateThread"),
- context_(CcTest::default_isolate(), context) {}
+ context_(CcTest::isolate(), context) {}
virtual void Run() {
- v8::Locker lock1(CcTest::default_isolate());
+ v8::Locker lock1(CcTest::isolate());
{
- v8::HandleScope handle_scope(CcTest::default_isolate());
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(CcTest::default_isolate(), context_);
+ v8::Local<v8::Context>::New(CcTest::isolate(), context_);
v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
{
- v8::Unlocker unlock1(CcTest::default_isolate());
+ v8::Unlocker unlock1(CcTest::isolate());
{
- v8::Locker lock2(CcTest::default_isolate());
- v8::HandleScope handle_scope(CcTest::default_isolate());
+ v8::Locker lock2(CcTest::isolate());
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
+ v8::HandleScope handle_scope(CcTest::isolate());
v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(CcTest::default_isolate(), context_);
+ v8::Local<v8::Context>::New(CcTest::isolate(), context_);
v8::Context::Scope context_scope(context);
CalcFibAndCheck();
}
@@ -644,9 +646,10 @@ TEST(LockUnlockLockDefaultIsolateMultithreaded) {
Local<v8::Context> context;
i::List<JoinableThread*> threads(kNThreads);
{
- v8::Locker locker_(CcTest::default_isolate());
- v8::HandleScope handle_scope(CcTest::default_isolate());
- context = v8::Context::New(CcTest::default_isolate());
+ v8::Locker locker_(CcTest::isolate());
+ v8::Isolate::Scope isolate_scope(CcTest::isolate());
+ v8::HandleScope handle_scope(CcTest::isolate());
+ context = v8::Context::New(CcTest::isolate());
for (int i = 0; i < kNThreads; i++) {
threads.Add(new LockUnlockLockDefaultIsolateThread(context));
}
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 09df19e2fc..4a0717d09d 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -72,7 +72,7 @@ static void DoTrace(Address fp) {
// sp is only used to define stack high bound
regs.sp =
reinterpret_cast<Address>(trace_env.sample) - 10240;
- trace_env.sample->Init(Isolate::Current(), regs);
+ trace_env.sample->Init(CcTest::i_isolate(), regs);
}
@@ -80,11 +80,11 @@ static void DoTrace(Address fp) {
// pure JS code is being executed
static void DoTraceHideCEntryFPAddress(Address fp) {
v8::internal::Address saved_c_frame_fp =
- *(Isolate::Current()->c_entry_fp_address());
+ *(CcTest::i_isolate()->c_entry_fp_address());
CHECK(saved_c_frame_fp);
- *(Isolate::Current()->c_entry_fp_address()) = 0;
+ *(CcTest::i_isolate()->c_entry_fp_address()) = 0;
DoTrace(fp);
- *(Isolate::Current()->c_entry_fp_address()) = saved_c_frame_fp;
+ *(CcTest::i_isolate()->c_entry_fp_address()) = saved_c_frame_fp;
}
@@ -156,8 +156,8 @@ void TraceExtension::JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args) {
static Address GetJsEntrySp() {
- CHECK_NE(NULL, i::Isolate::Current()->thread_local_top());
- return i::Isolate::Current()->js_entry_sp();
+ CHECK_NE(NULL, CcTest::i_isolate()->thread_local_top());
+ return CcTest::i_isolate()->js_entry_sp();
}
@@ -187,8 +187,10 @@ static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
}
-static bool IsAddressWithinFuncCode(const char* func_name, Address addr) {
- v8::Local<v8::Value> func = CcTest::env()->Global()->Get(v8_str(func_name));
+static bool IsAddressWithinFuncCode(v8::Local<v8::Context> context,
+ const char* func_name,
+ Address addr) {
+ v8::Local<v8::Value> func = context->Global()->Get(v8_str(func_name));
CHECK(func->IsFunction());
JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func));
return IsAddressWithinFuncCode(js_func, addr);
@@ -225,19 +227,21 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Use the API to create a JSFunction object that calls the above C++ function.
-void CreateFramePointerGrabberConstructor(const char* constructor_name) {
+void CreateFramePointerGrabberConstructor(v8::Local<v8::Context> context,
+ const char* constructor_name) {
Local<v8::FunctionTemplate> constructor_template =
v8::FunctionTemplate::New(construct_call);
constructor_template->SetClassName(v8_str("FPGrabber"));
Local<Function> fun = constructor_template->GetFunction();
- CcTest::env()->Global()->Set(v8_str(constructor_name), fun);
+ context->Global()->Set(v8_str(constructor_name), fun);
}
// Creates a global function named 'func_name' that calls the tracing
// function 'trace_func_name' with an actual EBP register value,
// encoded as one or two Smis.
-static void CreateTraceCallerFunction(const char* func_name,
+static void CreateTraceCallerFunction(v8::Local<v8::Context> context,
+ const char* func_name,
const char* trace_func_name) {
i::EmbeddedVector<char, 256> trace_call_buf;
i::OS::SNPrintF(trace_call_buf,
@@ -249,7 +253,7 @@ static void CreateTraceCallerFunction(const char* func_name,
// Create the FPGrabber function, which grabs the caller's frame pointer
// when called as a constructor.
- CreateFramePointerGrabberConstructor("FPGrabber");
+ CreateFramePointerGrabberConstructor(context, "FPGrabber");
// Compile the script.
CompileRun(trace_call_buf.start());
@@ -267,11 +271,13 @@ TEST(CFromJSStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
- CcTest::InitializeVM(TRACE_EXTENSION);
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Context::Scope context_scope(context);
+
// Create global function JSFuncDoTrace which calls
// extension function trace() with the current frame pointer value.
- CreateTraceCallerFunction("JSFuncDoTrace", "trace");
+ CreateTraceCallerFunction(context, "JSFuncDoTrace", "trace");
Local<Value> result = CompileRun(
"function JSTrace() {"
" JSFuncDoTrace();"
@@ -294,8 +300,9 @@ TEST(CFromJSStackTrace) {
int base = 0;
CHECK_GT(sample.frames_count, base + 1);
- CHECK(IsAddressWithinFuncCode("JSFuncDoTrace", sample.stack[base + 0]));
- CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 1]));
+ CHECK(IsAddressWithinFuncCode(
+ context, "JSFuncDoTrace", sample.stack[base + 0]));
+ CHECK(IsAddressWithinFuncCode(context, "JSTrace", sample.stack[base + 1]));
}
@@ -312,11 +319,13 @@ TEST(PureJSStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
- CcTest::InitializeVM(TRACE_EXTENSION);
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Context::Scope context_scope(context);
+
// Create global function JSFuncDoTrace which calls
// extension function js_trace() with the current frame pointer value.
- CreateTraceCallerFunction("JSFuncDoTrace", "js_trace");
+ CreateTraceCallerFunction(context, "JSFuncDoTrace", "js_trace");
Local<Value> result = CompileRun(
"function JSTrace() {"
" JSFuncDoTrace();"
@@ -343,8 +352,9 @@ TEST(PureJSStackTrace) {
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
int base = 0;
CHECK_GT(sample.frames_count, base + 1);
- CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 0]));
- CHECK(IsAddressWithinFuncCode("OuterJSTrace", sample.stack[base + 1]));
+ CHECK(IsAddressWithinFuncCode(context, "JSTrace", sample.stack[base + 0]));
+ CHECK(IsAddressWithinFuncCode(
+ context, "OuterJSTrace", sample.stack[base + 1]));
}
@@ -379,15 +389,18 @@ static int CFunc(int depth) {
TEST(PureCStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
- CcTest::InitializeVM(TRACE_EXTENSION);
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Context::Scope context_scope(context);
// Check that sampler doesn't crash
CHECK_EQ(10, CFunc(10));
}
TEST(JsEntrySp) {
- CcTest::InitializeVM(TRACE_EXTENSION);
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(TRACE_EXTENSION);
+ v8::Context::Scope context_scope(context);
CHECK_EQ(0, GetJsEntrySp());
CompileRun("a = 1; b = a + 1;");
CHECK_EQ(0, GetJsEntrySp());
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index f752c36ccb..2cf2a77445 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -60,9 +60,9 @@ class ScopedLoggerInitializer {
temp_file_(NULL),
// Need to run this prior to creating the scope.
trick_to_run_init_flags_(init_flags_()),
- scope_(v8::Isolate::GetCurrent()),
- env_(v8::Context::New(v8::Isolate::GetCurrent())),
- logger_(i::Isolate::Current()->logger()) {
+ scope_(CcTest::isolate()),
+ env_(v8::Context::New(CcTest::isolate())),
+ logger_(CcTest::i_isolate()->logger()) {
env_->Enter();
}
@@ -91,6 +91,7 @@ class ScopedLoggerInitializer {
i::FLAG_log = true;
i::FLAG_prof = true;
i::FLAG_logfile = i::Log::kLogToTemporaryFile;
+ i::FLAG_logfile_per_isolate = false;
return false;
}
@@ -169,8 +170,8 @@ class LoopingJsThread : public LoopingThread {
: LoopingThread(isolate) { }
void RunLoop() {
v8::Locker locker;
- CHECK(i::Isolate::Current() != NULL);
- CHECK_GT(i::Isolate::Current()->thread_manager()->CurrentId(), 0);
+ CHECK(CcTest::i_isolate() != NULL);
+ CHECK_GT(CcTest::i_isolate()->thread_manager()->CurrentId(), 0);
SetV8ThreadId();
while (IsRunning()) {
v8::HandleScope scope;
@@ -197,8 +198,8 @@ class LoopingNonJsThread : public LoopingThread {
v8::Locker locker;
v8::Unlocker unlocker;
// Now thread has V8's id, but will not run VM code.
- CHECK(i::Isolate::Current() != NULL);
- CHECK_GT(i::Isolate::Current()->thread_manager()->CurrentId(), 0);
+ CHECK(CcTest::i_isolate() != NULL);
+ CHECK_GT(CcTest::i_isolate()->thread_manager()->CurrentId(), 0);
double i = 10;
SignalRunning();
while (IsRunning()) {
@@ -243,14 +244,14 @@ TEST(ProfMultipleThreads) {
TestSampler* sampler = NULL;
{
v8::Locker locker;
- sampler = new TestSampler(v8::internal::Isolate::Current());
+ sampler = new TestSampler(CcTest::i_isolate());
sampler->Start();
CHECK(sampler->IsActive());
}
- LoopingJsThread jsThread(v8::internal::Isolate::Current());
+ LoopingJsThread jsThread(CcTest::i_isolate());
jsThread.Start();
- LoopingNonJsThread nonJsThread(v8::internal::Isolate::Current());
+ LoopingNonJsThread nonJsThread(CcTest::i_isolate());
nonJsThread.Start();
CHECK(!sampler->WasSampleStackCalled());
@@ -299,8 +300,8 @@ class SimpleExternalString : public v8::String::ExternalStringResource {
} // namespace
TEST(Issue23768) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Handle<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
SimpleExternalString source_ext_str("(function ext() {})();");
@@ -317,7 +318,7 @@ TEST(Issue23768) {
i_source->set_resource(NULL);
// Must not crash.
- i::Isolate::Current()->logger()->LogCompiledFunctions();
+ CcTest::i_isolate()->logger()->LogCompiledFunctions();
}
@@ -330,7 +331,7 @@ TEST(LogCallbacks) {
Logger* logger = initialize_logger.logger();
v8::Local<v8::FunctionTemplate> obj =
- v8::Local<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
+ v8::Local<v8::FunctionTemplate>::New(CcTest::isolate(),
v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
@@ -379,7 +380,7 @@ TEST(LogAccessorCallbacks) {
Logger* logger = initialize_logger.logger();
v8::Local<v8::FunctionTemplate> obj =
- v8::Local<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
+ v8::Local<v8::FunctionTemplate>::New(CcTest::isolate(),
v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
@@ -439,7 +440,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
logger->StopProfiler();
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
logger->StringEvent("test-logging-done", "");
// Iterate heap to find compiled functions, will write to log.
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
new file mode 100644
index 0000000000..77f7abbd44
--- /dev/null
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+#include "macro-assembler.h"
+#include "arm/macro-assembler-arm.h"
+#include "arm/simulator-arm.h"
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+typedef void* (*F)(int x, int y, int p2, int p3, int p4);
+
+#define __ masm->
+
+
+static byte to_non_zero(int n) {
+ return static_cast<unsigned>(n) % 255 + 1;
+}
+
+
+static bool all_zeroes(const byte* beg, const byte* end) {
+ CHECK(beg);
+ CHECK(beg <= end);
+ while (beg < end) {
+ if (*beg++ != 0)
+ return false;
+ }
+ return true;
+}
+
+
+TEST(CopyBytes) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+
+ const int data_size = 1 * KB;
+ size_t act_size;
+
+ // Allocate two blocks to copy data between.
+ byte* src_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ CHECK(src_buffer);
+ CHECK(act_size >= static_cast<size_t>(data_size));
+ byte* dest_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ CHECK(dest_buffer);
+ CHECK(act_size >= static_cast<size_t>(data_size));
+
+ // Storage for R0 and R1.
+ byte* r0_;
+ byte* r1_;
+
+ MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler* masm = &assembler;
+
+ // Code to be generated: The stuff in CopyBytes followed by a store of R0 and
+ // R1, respectively.
+ __ CopyBytes(r0, r1, r2, r3);
+ __ mov(r2, Operand(reinterpret_cast<int>(&r0_)));
+ __ mov(r3, Operand(reinterpret_cast<int>(&r1_)));
+ __ str(r0, MemOperand(r2));
+ __ str(r1, MemOperand(r3));
+ __ bx(lr);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+
+ F f = FUNCTION_CAST<F>(Code::cast(code)->entry());
+
+ // Initialise source data with non-zero bytes.
+ for (int i = 0; i < data_size; i++) {
+ src_buffer[i] = to_non_zero(i);
+ }
+
+ const int fuzz = 11;
+
+ for (int size = 0; size < 600; size++) {
+ for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
+ for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
+ memset(dest_buffer, 0, data_size);
+ CHECK(dest + size < dest_buffer + data_size);
+ (void) CALL_GENERATED_CODE(f, reinterpret_cast<int>(src),
+ reinterpret_cast<int>(dest), size, 0, 0);
+ // R0 and R1 should point at the first byte after the copied data.
+ CHECK_EQ(src + size, r0_);
+ CHECK_EQ(dest + size, r1_);
+ // Check that we haven't written outside the target area.
+ CHECK(all_zeroes(dest_buffer, dest));
+ CHECK(all_zeroes(dest + size, dest_buffer + data_size));
+ // Check the target area.
+ CHECK_EQ(0, memcmp(src, dest, size));
+ }
+ }
+ }
+
+ // Check that the source data hasn't been clobbered.
+ for (int i = 0; i < data_size; i++) {
+ CHECK(src_buffer[i] == to_non_zero(i));
+ }
+}
+
+
+
+#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
new file mode 100644
index 0000000000..b200949679
--- /dev/null
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+#include "macro-assembler.h"
+#include "mips/macro-assembler-mips.h"
+#include "mips/simulator-mips.h"
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+typedef void* (*F)(int x, int y, int p2, int p3, int p4);
+
+#define __ masm->
+
+
+static byte to_non_zero(int n) {
+ return static_cast<unsigned>(n) % 255 + 1;
+}
+
+
+static bool all_zeroes(const byte* beg, const byte* end) {
+ CHECK(beg);
+ CHECK(beg <= end);
+ while (beg < end) {
+ if (*beg++ != 0)
+ return false;
+ }
+ return true;
+}
+
+
+TEST(CopyBytes) {
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+
+ const int data_size = 1 * KB;
+ size_t act_size;
+
+ // Allocate two blocks to copy data between.
+ byte* src_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ CHECK(src_buffer);
+ CHECK(act_size >= static_cast<size_t>(data_size));
+ byte* dest_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ CHECK(dest_buffer);
+ CHECK(act_size >= static_cast<size_t>(data_size));
+
+ // Storage for a0 and a1.
+ byte* a0_;
+ byte* a1_;
+
+ MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler* masm = &assembler;
+
+ // Code to be generated: The stuff in CopyBytes followed by a store of a0 and
+ // a1, respectively.
+ __ CopyBytes(a0, a1, a2, a3);
+ __ li(a2, Operand(reinterpret_cast<int>(&a0_)));
+ __ li(a3, Operand(reinterpret_cast<int>(&a1_)));
+ __ sw(a0, MemOperand(a2));
+ __ jr(ra);
+ __ sw(a1, MemOperand(a3));
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+
+ ::F f = FUNCTION_CAST< ::F>(Code::cast(code)->entry());
+
+ // Initialise source data with non-zero bytes.
+ for (int i = 0; i < data_size; i++) {
+ src_buffer[i] = to_non_zero(i);
+ }
+
+ const int fuzz = 11;
+
+ for (int size = 0; size < 600; size++) {
+ for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
+ for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
+ memset(dest_buffer, 0, data_size);
+ CHECK(dest + size < dest_buffer + data_size);
+ (void) CALL_GENERATED_CODE(f, reinterpret_cast<int>(src),
+ reinterpret_cast<int>(dest), size, 0, 0);
+ // a0 and a1 should point at the first byte after the copied data.
+ CHECK_EQ(src + size, a0_);
+ CHECK_EQ(dest + size, a1_);
+ // Check that we haven't written outside the target area.
+ CHECK(all_zeroes(dest_buffer, dest));
+ CHECK(all_zeroes(dest + size, dest_buffer + data_size));
+ // Check the target area.
+ CHECK_EQ(0, memcmp(src, dest, size));
+ }
+ }
+ }
+
+ // Check that the source data hasn't been clobbered.
+ for (int i = 0; i < data_size; i++) {
+ CHECK(src_buffer[i] == to_non_zero(i));
+ }
+}
+
+
+
+#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index a2070a5ea8..61914b58c3 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -47,6 +47,7 @@ using v8::internal::MacroAssembler;
using v8::internal::OS;
using v8::internal::Operand;
using v8::internal::RelocInfo;
+using v8::internal::Representation;
using v8::internal::Smi;
using v8::internal::SmiIndex;
using v8::internal::byte;
@@ -141,8 +142,8 @@ TEST(Smi) {
static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
__ movl(rax, Immediate(id));
- __ Move(rcx, Smi::FromInt(0));
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ Move(rcx, value);
+ __ Set(rdx, reinterpret_cast<intptr_t>(value));
__ cmpq(rcx, rdx);
__ j(not_equal, exit);
}
@@ -157,7 +158,7 @@ TEST(SmiMove) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
@@ -246,7 +247,7 @@ TEST(SmiCompare) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -297,7 +298,7 @@ TEST(Integer32ToSmi) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -426,7 +427,7 @@ TEST(Integer64PlusConstantToSmi) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -471,7 +472,7 @@ TEST(SmiCheck) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -720,7 +721,7 @@ TEST(SmiNeg) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -751,8 +752,6 @@ TEST(SmiNeg) {
}
-
-
static void SmiAddTest(MacroAssembler* masm,
Label* exit,
int id,
@@ -802,15 +801,122 @@ static void SmiAddTest(MacroAssembler* masm,
}
+static void SmiAddOverflowTest(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int x) {
+ // Adds a Smi to x so that the addition overflows.
+ ASSERT(x != 0); // Can't overflow by adding a Smi.
+ int y_max = (x > 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue - x - 1);
+ int y_min = (x > 0) ? (Smi::kMaxValue - x + 1) : (Smi::kMinValue + 0);
+
+ __ movl(rax, Immediate(id));
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx); // Store original Smi value of x in r11.
+ __ Move(rdx, Smi::FromInt(y_min));
+ {
+ Label overflow_ok;
+ __ SmiAdd(r9, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAdd(rcx, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ __ movq(rcx, r11);
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ __ Move(rdx, Smi::FromInt(y_max));
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAdd(r9, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAdd(rcx, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ __ movq(rcx, r11);
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ cmpq(rcx, r11);
+ __ j(not_equal, exit);
+ }
+}
+
+
TEST(SmiAdd) {
v8::internal::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -829,6 +935,14 @@ TEST(SmiAdd) {
SmiAddTest(masm, &exit, 0x70, Smi::kMaxValue, -5);
SmiAddTest(masm, &exit, 0x80, Smi::kMaxValue, Smi::kMinValue);
+ SmiAddOverflowTest(masm, &exit, 0x90, -1);
+ SmiAddOverflowTest(masm, &exit, 0xA0, 1);
+ SmiAddOverflowTest(masm, &exit, 0xB0, 1024);
+ SmiAddOverflowTest(masm, &exit, 0xC0, Smi::kMaxValue);
+ SmiAddOverflowTest(masm, &exit, 0xD0, -2);
+ SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
+ SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
+
__ xor_(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
@@ -886,6 +1000,7 @@ static void SmiSubTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
+
static void SmiSubOverflowTest(MacroAssembler* masm,
Label* exit,
int id,
@@ -1001,7 +1116,7 @@ TEST(SmiSub) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1092,7 +1207,7 @@ TEST(SmiMul) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1199,7 +1314,7 @@ TEST(SmiDiv) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1310,7 +1425,7 @@ TEST(SmiMod) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1408,7 +1523,7 @@ TEST(SmiIndex) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1478,7 +1593,7 @@ TEST(SmiSelectNonSmi) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1558,7 +1673,7 @@ TEST(SmiAnd) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1640,7 +1755,7 @@ TEST(SmiOr) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1724,7 +1839,7 @@ TEST(SmiXor) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1792,7 +1907,7 @@ TEST(SmiNot) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1889,7 +2004,7 @@ TEST(SmiShiftLeft) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -1996,7 +2111,7 @@ TEST(SmiShiftLogicalRight) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -2066,7 +2181,7 @@ TEST(SmiShiftArithmeticRight) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -2131,7 +2246,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -2175,7 +2290,7 @@ TEST(OperandOffset) {
&actual_size,
true));
CHECK(buffer);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
@@ -2520,5 +2635,114 @@ TEST(OperandOffset) {
}
+TEST(LoadAndStoreWithRepresentation) {
+ v8::internal::V8::Initialize(NULL);
+
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
+ masm->set_allow_stub_calls(false);
+ EntryCode(masm);
+ __ subq(rsp, Immediate(1 * kPointerSize));
+ Label exit;
+
+ // Test 1.
+ __ movq(rax, Immediate(1)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(rcx, Immediate(-1));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Byte());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ movl(rdx, Immediate(255));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Byte());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 2.
+ __ movq(rax, Immediate(2)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ Set(rcx, V8_2PART_UINT64_C(0xdeadbeaf, 12345678));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Smi());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Set(rdx, V8_2PART_UINT64_C(0xdeadbeaf, 12345678));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Smi());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 3.
+ __ movq(rax, Immediate(3)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movq(rcx, Immediate(-1));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Integer32());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ movl(rdx, Immediate(-1));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Integer32());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 4.
+ __ movq(rax, Immediate(4)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ movl(rcx, Immediate(0x44332211));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::HeapObject());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ movl(rdx, Immediate(0x44332211));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::HeapObject());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 5.
+ __ movq(rax, Immediate(5)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ Set(rcx, V8_2PART_UINT64_C(0x12345678, deadbeaf));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::Tagged());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Set(rdx, V8_2PART_UINT64_C(0x12345678, deadbeaf));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::Tagged());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Test 6.
+ __ movq(rax, Immediate(6)); // Test number.
+ __ movq(Operand(rsp, 0 * kPointerSize), Immediate(0));
+ __ Set(rcx, V8_2PART_UINT64_C(0x11223344, 55667788));
+ __ Store(Operand(rsp, 0 * kPointerSize), rcx, Representation::External());
+ __ movq(rcx, Operand(rsp, 0 * kPointerSize));
+ __ Set(rdx, V8_2PART_UINT64_C(0x11223344, 55667788));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+ __ Load(rdx, Operand(rsp, 0 * kPointerSize), Representation::External());
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ addq(rsp, Immediate(1 * kPointerSize));
+ ExitCode(masm);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 33d9230e01..e62bdeb074 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -73,86 +73,63 @@ TEST(MarkingDeque) {
TEST(Promotion) {
- // This test requires compaction. If compaction is turned off, we
- // skip the entire test.
- if (FLAG_never_compact) return;
-
- // Ensure that we get a compacting collection so that objects are promoted
- // from new space.
- FLAG_gc_global = true;
- FLAG_always_compact = true;
- HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
-
CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ heap->ConfigureHeap(2*256*KB, 1*MB, 1*MB);
v8::HandleScope sc(CcTest::isolate());
// Allocate a fixed array in the new space.
- int array_size =
+ int array_length =
(Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
- (kPointerSize * 4);
- Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
-
+ (4 * kPointerSize);
+ Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
// Array should be in the new space.
- CHECK(HEAP->InSpace(*array, NEW_SPACE));
+ CHECK(heap->InSpace(*array, NEW_SPACE));
- // Call the m-c collector, so array becomes an old object.
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ // Call mark compact GC, so array becomes an old object.
+ heap->CollectGarbage(OLD_POINTER_SPACE);
// Array now sits in the old space
- CHECK(HEAP->InSpace(*array, OLD_POINTER_SPACE));
+ CHECK(heap->InSpace(*array, OLD_POINTER_SPACE));
}
TEST(NoPromotion) {
- HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
-
- // Test the situation that some objects in new space are promoted to
- // the old space
CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ heap->ConfigureHeap(2*256*KB, 1*MB, 1*MB);
v8::HandleScope sc(CcTest::isolate());
- // Do a mark compact GC to shrink the heap.
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
-
- // Allocate a big Fixed array in the new space.
- int length = (Page::kMaxNonCodeHeapObjectSize -
- FixedArray::kHeaderSize) / (2 * kPointerSize);
- Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
- ToObjectChecked();
-
+ // Allocate a big fixed array in the new space.
+ int array_length =
+ (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
+ (2 * kPointerSize);
+ Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
- // Array still stays in the new space.
- CHECK(HEAP->InSpace(*array, NEW_SPACE));
-
- // Allocate objects in the old space until out of memory.
- FixedArray* host = *array;
- while (true) {
- Object* obj;
- { MaybeObject* maybe_obj = HEAP->AllocateFixedArray(100, TENURED);
- if (!maybe_obj->ToObject(&obj)) break;
- }
+ // Array should be in the new space.
+ CHECK(heap->InSpace(*array, NEW_SPACE));
- host->set(0, obj);
- host = FixedArray::cast(obj);
- }
+ // Simulate a full old space to make promotion fail.
+ SimulateFullSpace(heap->old_pointer_space());
// Call mark compact GC, and it should pass.
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ heap->CollectGarbage(OLD_POINTER_SPACE);
}
TEST(MarkCompactCollector) {
FLAG_incremental_marking = false;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
v8::HandleScope sc(CcTest::isolate());
+ Handle<GlobalObject> global(isolate->context()->global_object());
// call mark-compact when heap is empty
heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 1");
@@ -191,8 +168,8 @@ TEST(MarkCompactCollector) {
Map::cast(heap->AllocateMap(JS_OBJECT_TYPE,
JSObject::kHeaderSize)->ToObjectChecked());
function->set_initial_map(initial_map);
- isolate->context()->global_object()->SetProperty(
- func_name, function, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(
+ global, handle(func_name), handle(function), NONE, kNonStrictMode);
JSObject* obj = JSObject::cast(
heap->AllocateJSObject(function)->ToObjectChecked());
@@ -200,7 +177,7 @@ TEST(MarkCompactCollector) {
func_name = String::cast(
heap->InternalizeUtf8String("theFunction")->ToObjectChecked());
- CHECK(isolate->context()->global_object()->HasLocalProperty(func_name));
+ CHECK(JSReceiver::HasLocalProperty(global, handle(func_name)));
Object* func_value = isolate->context()->global_object()->
GetProperty(func_name)->ToObjectChecked();
CHECK(func_value->IsJSFunction());
@@ -209,20 +186,19 @@ TEST(MarkCompactCollector) {
obj = JSObject::cast(heap->AllocateJSObject(function)->ToObjectChecked());
String* obj_name =
String::cast(heap->InternalizeUtf8String("theObject")->ToObjectChecked());
- isolate->context()->global_object()->SetProperty(
- obj_name, obj, NONE, kNonStrictMode)->ToObjectChecked();
+ JSReceiver::SetProperty(
+ global, handle(obj_name), handle(obj), NONE, kNonStrictMode);
String* prop_name =
String::cast(heap->InternalizeUtf8String("theSlot")->ToObjectChecked());
- obj->SetProperty(prop_name,
- Smi::FromInt(23),
- NONE,
- kNonStrictMode)->ToObjectChecked();
+ Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
+ JSReceiver::SetProperty(
+ handle(obj), handle(prop_name), twenty_three, NONE, kNonStrictMode);
heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 5");
obj_name =
String::cast(heap->InternalizeUtf8String("theObject")->ToObjectChecked());
- CHECK(isolate->context()->global_object()->HasLocalProperty(obj_name));
+ CHECK(JSReceiver::HasLocalProperty(global, handle(obj_name)));
CHECK(isolate->context()->global_object()->
GetProperty(obj_name)->ToObjectChecked()->IsJSObject());
obj = JSObject::cast(isolate->context()->global_object()->
@@ -243,7 +219,7 @@ static Handle<Map> CreateMap(Isolate* isolate) {
TEST(MapCompact) {
FLAG_max_map_space_pages = 16;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
{
@@ -255,51 +231,18 @@ TEST(MapCompact) {
Handle<Map> map = CreateMap();
map->set_prototype(*root);
root = factory->NewJSObjectFromMap(map);
- } while (HEAP->map_space()->MapPointersEncodable());
+ } while (CcTest::heap()->map_space()->MapPointersEncodable());
}
// Now, as we don't have any handles to just allocated maps, we should
// be able to trigger map compaction.
// To give an additional chance to fail, try to force compaction which
// should be impossible right now.
- HEAP->CollectAllGarbage(Heap::kForceCompactionMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kForceCompactionMask);
// And now map pointers should be encodable again.
- CHECK(HEAP->map_space()->MapPointersEncodable());
+ CHECK(CcTest::heap()->map_space()->MapPointersEncodable());
}
#endif
-static int gc_starts = 0;
-static int gc_ends = 0;
-
-static void GCPrologueCallbackFunc() {
- CHECK(gc_starts == gc_ends);
- gc_starts++;
-}
-
-
-static void GCEpilogueCallbackFunc() {
- CHECK(gc_starts == gc_ends + 1);
- gc_ends++;
-}
-
-
-TEST(GCCallback) {
- i::FLAG_stress_compaction = false;
- CcTest::InitializeVM();
-
- HEAP->SetGlobalGCPrologueCallback(&GCPrologueCallbackFunc);
- HEAP->SetGlobalGCEpilogueCallback(&GCEpilogueCallbackFunc);
-
- // Scavenge does not call GC callback functions.
- HEAP->PerformScavenge();
-
- CHECK_EQ(0, gc_starts);
- CHECK_EQ(gc_ends, gc_starts);
-
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
- CHECK_EQ(1, gc_starts);
- CHECK_EQ(gc_ends, gc_starts);
-}
-
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(v8::Isolate* isolate,
@@ -314,17 +257,17 @@ static void WeakPointerCallback(v8::Isolate* isolate,
TEST(ObjectGroups) {
FLAG_incremental_marking = false;
CcTest::InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
-
+ GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
+ Heap* heap = CcTest::heap();
NumberOfWeakCalls = 0;
v8::HandleScope handle_scope(CcTest::isolate());
Handle<Object> g1s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g1c1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g1s1.location(),
reinterpret_cast<void*>(1234),
&WeakPointerCallback);
@@ -336,11 +279,11 @@ TEST(ObjectGroups) {
&WeakPointerCallback);
Handle<Object> g2s1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2s2 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
Handle<Object> g2c1 =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->Create(heap->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g2s1.location(),
reinterpret_cast<void*>(1234),
&WeakPointerCallback);
@@ -370,7 +313,7 @@ TEST(ObjectGroups) {
Handle<HeapObject>::cast(g2s1).location(), g2_children, 1);
}
// Do a full GC
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ heap->CollectGarbage(OLD_POINTER_SPACE);
// All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls);
@@ -398,7 +341,7 @@ TEST(ObjectGroups) {
Handle<HeapObject>::cast(g2s1).location(), g2_children, 1);
}
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ heap->CollectGarbage(OLD_POINTER_SPACE);
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, NumberOfWeakCalls);
@@ -411,7 +354,7 @@ TEST(ObjectGroups) {
reinterpret_cast<void*>(1234),
&WeakPointerCallback);
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ heap->CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
}
@@ -442,12 +385,12 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
TEST(EmptyObjectGroups) {
CcTest::InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
v8::HandleScope handle_scope(CcTest::isolate());
- Handle<Object> object =
- global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ Handle<Object> object = global_handles->Create(
+ CcTest::heap()->AllocateFixedArray(1)->ToObjectChecked());
TestRetainedObjectInfo info;
global_handles->AddObjectGroup(NULL, 0, &info);
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index b129ff3af4..b4488a603a 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -58,7 +58,7 @@ class HarmonyIsolate {
TEST(PerIsolateState) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context1;
+ LocalContext context1(isolate.GetIsolate());
CompileRun(
"var count = 0;"
"var calls = 0;"
@@ -71,20 +71,20 @@ TEST(PerIsolateState) {
"(function() { obj.foo = 'bar'; })");
Handle<Value> notify_fun2;
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), obj);
notify_fun2 = CompileRun(
"(function() { obj.foo = 'baz'; })");
}
Handle<Value> notify_fun3;
{
- LocalContext context3;
+ LocalContext context3(isolate.GetIsolate());
context3->Global()->Set(String::New("obj"), obj);
notify_fun3 = CompileRun(
"(function() { obj.foo = 'bat'; })");
}
{
- LocalContext context4;
+ LocalContext context4(isolate.GetIsolate());
context4->Global()->Set(String::New("observer"), observer);
context4->Global()->Set(String::New("fun1"), notify_fun1);
context4->Global()->Set(String::New("fun2"), notify_fun2);
@@ -99,7 +99,7 @@ TEST(PerIsolateState) {
TEST(EndOfMicrotaskDelivery) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
CompileRun(
"var obj = {};"
"var count = 0;"
@@ -113,7 +113,7 @@ TEST(EndOfMicrotaskDelivery) {
TEST(DeliveryOrdering) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
CompileRun(
"var obj1 = {};"
"var obj2 = {};"
@@ -145,7 +145,7 @@ TEST(DeliveryOrdering) {
TEST(DeliveryOrderingReentrant) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
CompileRun(
"var obj = {};"
"var reentered = false;"
@@ -177,7 +177,7 @@ TEST(DeliveryOrderingReentrant) {
TEST(DeliveryOrderingDeliverChangeRecords) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
CompileRun(
"var obj = {};"
"var ordering = [];"
@@ -203,14 +203,14 @@ TEST(ObjectHashTableGrowth) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
// Initializing this context sets up initial hash tables.
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
Handle<Value> obj = CompileRun("obj = {};");
Handle<Value> observer = CompileRun(
"var ran = false;"
"(function() { ran = true })");
{
// As does initializing this context.
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), obj);
context2->Global()->Set(String::New("observer"), observer);
CompileRun(
@@ -231,7 +231,7 @@ TEST(ObjectHashTableGrowth) {
TEST(GlobalObjectObservation) {
HarmonyIsolate isolate;
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
HandleScope scope(isolate.GetIsolate());
Handle<Object> global_proxy = context->Global();
Handle<Object> inner_global = global_proxy->GetPrototype().As<Object>();
@@ -263,7 +263,7 @@ TEST(GlobalObjectObservation) {
// to the old context.
context->DetachGlobal();
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->DetachGlobal();
context2->ReattachGlobal(global_proxy);
CompileRun(
@@ -278,7 +278,8 @@ TEST(GlobalObjectObservation) {
// Attaching by passing to Context::New
{
// Delegates to Context::New
- LocalContext context3(NULL, Handle<ObjectTemplate>(), global_proxy);
+ LocalContext context3(
+ isolate.GetIsolate(), NULL, Handle<ObjectTemplate>(), global_proxy);
CompileRun(
"var records3 = [];"
"Object.observe(this, function(r) { [].push.apply(records3, r) });"
@@ -330,7 +331,7 @@ static void ExpectRecords(Handle<Value> records,
TEST(APITestBasicMutation) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
Handle<Object> obj = Handle<Object>::Cast(CompileRun(
"var records = [];"
"var obj = {};"
@@ -374,7 +375,7 @@ TEST(APITestBasicMutation) {
TEST(HiddenPrototypeObservation) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
Handle<FunctionTemplate> tmpl = FunctionTemplate::New();
tmpl->SetHiddenPrototype(true);
tmpl->InstanceTemplate()->Set(String::New("foo"), Number::New(75));
@@ -393,7 +394,7 @@ TEST(HiddenPrototypeObservation) {
{ obj, "updated", "foo", Number::New(75) }
};
EXPECT_RECORDS(CompileRun("records"), expected_records);
- obj->SetPrototype(Null());
+ obj->SetPrototype(Null(isolate.GetIsolate()));
CompileRun("obj.foo = 43");
const RecordExpectation expected_records2[] = {
{ obj, "new", "foo", Handle<Value>() }
@@ -423,14 +424,15 @@ static int NumberOfElements(i::Handle<i::JSWeakMap> map) {
TEST(ObservationWeakMap) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
CompileRun(
"var obj = {};"
"Object.observe(obj, function(){});"
"Object.getNotifier(obj);"
"obj = null;");
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate.GetIsolate());
i::Handle<i::JSObject> observation_state =
- i::Isolate::Current()->factory()->observation_state();
+ i_isolate->factory()->observation_state();
i::Handle<i::JSWeakMap> callbackInfoMap =
i::Handle<i::JSWeakMap>::cast(
i::GetProperty(observation_state, "callbackInfoMap"));
@@ -443,7 +445,7 @@ TEST(ObservationWeakMap) {
CHECK_EQ(1, NumberOfElements(callbackInfoMap));
CHECK_EQ(1, NumberOfElements(objectInfoMap));
CHECK_EQ(1, NumberOfElements(notifierObjectInfoMap));
- HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ i_isolate->heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(0, NumberOfElements(callbackInfoMap));
CHECK_EQ(0, NumberOfElements(objectInfoMap));
CHECK_EQ(0, NumberOfElements(notifierObjectInfoMap));
@@ -463,50 +465,54 @@ static bool IndexedAccessAlwaysAllowed(Local<Object>, uint32_t, AccessType,
static AccessType g_access_block_type = ACCESS_GET;
+static const uint32_t kBlockedContextIndex = 1337;
static bool NamedAccessAllowUnlessBlocked(Local<Object> host,
- Local<Value> key,
- AccessType type,
- Local<Value>) {
+ Local<Value> key,
+ AccessType type,
+ Local<Value> data) {
if (type != g_access_block_type) return true;
- Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> blacklist = global->Get(String::New("blacklist"));
- if (!blacklist->IsObject()) return true;
- if (key->IsString()) return !blacklist.As<Object>()->Has(key);
- return true;
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(
+ Utils::OpenHandle(*host)->GetIsolate());
+ Handle<Object> global = isolate->GetCurrentContext()->Global();
+ if (!global->Has(kBlockedContextIndex)) return true;
+ return !key->IsString() || !key->Equals(data);
}
static bool IndexedAccessAllowUnlessBlocked(Local<Object> host,
- uint32_t index,
- AccessType type,
- Local<Value>) {
- if (type != ACCESS_GET) return true;
- Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> blacklist = global->Get(String::New("blacklist"));
- if (!blacklist->IsObject()) return true;
- return !blacklist.As<Object>()->Has(index);
+ uint32_t index,
+ AccessType type,
+ Local<Value> data) {
+ if (type != g_access_block_type) return true;
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(
+ Utils::OpenHandle(*host)->GetIsolate());
+ Handle<Object> global = isolate->GetCurrentContext()->Global();
+ if (!global->Has(kBlockedContextIndex)) return true;
+ return index != data->Uint32Value();
}
static bool BlockAccessKeys(Local<Object> host, Local<Value> key,
AccessType type, Local<Value>) {
- Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> blacklist = global->Get(String::New("blacklist"));
- if (!blacklist->IsObject()) return true;
- return type != ACCESS_KEYS ||
- !blacklist.As<Object>()->Has(String::New("__block_access_keys"));
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(
+ Utils::OpenHandle(*host)->GetIsolate());
+ Handle<Object> global = isolate->GetCurrentContext()->Global();
+ return type != ACCESS_KEYS || !global->Has(kBlockedContextIndex);
}
static Handle<Object> CreateAccessCheckedObject(
NamedSecurityCallback namedCallback,
- IndexedSecurityCallback indexedCallback) {
+ IndexedSecurityCallback indexedCallback,
+ Handle<Value> data = Handle<Value>()) {
Handle<ObjectTemplate> tmpl = ObjectTemplate::New();
- tmpl->SetAccessCheckCallbacks(namedCallback, indexedCallback);
+ tmpl->SetAccessCheckCallbacks(namedCallback, indexedCallback, data);
Handle<Object> instance = tmpl->NewInstance();
- instance->CreationContext()->Global()->Set(String::New("obj"), instance);
+ Handle<Object> global = instance->CreationContext()->Global();
+ global->Set(String::New("obj"), instance);
+ global->Set(kBlockedContextIndex, v8::True());
return instance;
}
@@ -516,19 +522,20 @@ TEST(NamedAccessCheck) {
const AccessType types[] = { ACCESS_GET, ACCESS_HAS };
for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- NamedAccessAllowUnlessBlocked, IndexedAccessAlwaysAllowed);
+ NamedAccessAllowUnlessBlocked,
+ IndexedAccessAlwaysAllowed,
+ String::New("foo"));
CompileRun("var records = null;"
"var objNoCheck = {};"
- "var blacklist = {foo: true};"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);"
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), instance);
context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
@@ -563,19 +570,19 @@ TEST(IndexedAccessCheck) {
const AccessType types[] = { ACCESS_GET, ACCESS_HAS };
for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- NamedAccessAlwaysAllowed, IndexedAccessAllowUnlessBlocked);
+ NamedAccessAlwaysAllowed, IndexedAccessAllowUnlessBlocked,
+ Number::New(7));
CompileRun("var records = null;"
"var objNoCheck = {};"
- "var blacklist = {7: true};"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);"
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), instance);
context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
@@ -608,21 +615,21 @@ TEST(IndexedAccessCheck) {
TEST(SpliceAccessCheck) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
g_access_block_type = ACCESS_GET;
Handle<Object> instance = CreateAccessCheckedObject(
- NamedAccessAlwaysAllowed, IndexedAccessAllowUnlessBlocked);
+ NamedAccessAlwaysAllowed, IndexedAccessAllowUnlessBlocked,
+ Number::New(1));
CompileRun("var records = null;"
"obj[1] = 'foo';"
"obj.length = 2;"
"var objNoCheck = {1: 'bar', length: 2};"
- "var blacklist = {1: true};"
"observer = function(r) { records = r };"
"Array.observe(obj, observer);"
"Array.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), instance);
context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
@@ -653,18 +660,17 @@ TEST(SpliceAccessCheck) {
TEST(DisallowAllForAccessKeys) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
Handle<Object> instance = CreateAccessCheckedObject(
BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
- "var blacklist = {__block_access_keys: true};"
"Object.observe(obj, observer);"
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), instance);
context2->Global()->Set(String::New("objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
@@ -691,15 +697,14 @@ TEST(DisallowAllForAccessKeys) {
TEST(AccessCheckDisallowApiModifications) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ LocalContext context(isolate.GetIsolate());
Handle<Object> instance = CreateAccessCheckedObject(
BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var observer = function(r) { records = r };"
- "var blacklist = {__block_access_keys: true};"
"Object.observe(obj, observer);");
{
- LocalContext context2;
+ LocalContext context2(isolate.GetIsolate());
context2->Global()->Set(String::New("obj"), instance);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
@@ -715,3 +720,18 @@ TEST(AccessCheckDisallowApiModifications) {
}
CHECK(CompileRun("records")->IsNull());
}
+
+
+TEST(HiddenPropertiesLeakage) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context(isolate.GetIsolate());
+ CompileRun("var obj = {};"
+ "var records = null;"
+ "var observer = function(r) { records = r };"
+ "Object.observe(obj, observer);");
+ Handle<Value> obj = context->Global()->Get(String::New("obj"));
+ Handle<Object>::Cast(obj)->SetHiddenValue(String::New("foo"), Null());
+ CompileRun(""); // trigger delivery
+ CHECK(CompileRun("records")->IsNull());
+}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 80b276c8f9..952cb68cec 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -107,6 +107,7 @@ TEST(ScanKeywords) {
TEST(ScanHTMLEndComments) {
v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
// Regression test. See:
// http://code.google.com/p/chromium/issues/detail?id=53548
@@ -139,19 +140,19 @@ TEST(ScanHTMLEndComments) {
// Parser/Scanner needs a stack limit.
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
for (int i = 0; tests[i]; i++) {
v8::ScriptData* data =
- v8::ScriptData::PreCompile(tests[i], i::StrLength(tests[i]));
+ v8::ScriptData::PreCompile(isolate, tests[i], i::StrLength(tests[i]));
CHECK(data != NULL && !data->HasError());
delete data;
}
for (int i = 0; fail_tests[i]; i++) {
- v8::ScriptData* data =
- v8::ScriptData::PreCompile(fail_tests[i], i::StrLength(fail_tests[i]));
+ v8::ScriptData* data = v8::ScriptData::PreCompile(
+ isolate, fail_tests[i], i::StrLength(fail_tests[i]));
CHECK(data == NULL || data->HasError());
delete data;
}
@@ -173,12 +174,12 @@ class ScriptResource : public v8::String::ExternalAsciiStringResource {
TEST(Preparsing) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
// Source containing functions that might be lazily compiled and all types
@@ -199,7 +200,7 @@ TEST(Preparsing) {
int error_source_length = i::StrLength(error_source);
v8::ScriptData* preparse =
- v8::ScriptData::PreCompile(source, source_length);
+ v8::ScriptData::PreCompile(isolate, source, source_length);
CHECK(!preparse->HasError());
bool lazy_flag = i::FLAG_lazy;
{
@@ -221,7 +222,7 @@ TEST(Preparsing) {
// Syntax error.
v8::ScriptData* error_preparse =
- v8::ScriptData::PreCompile(error_source, error_source_length);
+ v8::ScriptData::PreCompile(isolate, error_source, error_source_length);
CHECK(error_preparse->HasError());
i::ScriptDataImpl *pre_impl =
reinterpret_cast<i::ScriptDataImpl*>(error_preparse);
@@ -241,7 +242,7 @@ TEST(StandAlonePreParser) {
v8::V8::Initialize();
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
const char* programs[] = {
@@ -253,22 +254,21 @@ TEST(StandAlonePreParser) {
NULL
};
- uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+ uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(program),
static_cast<unsigned>(strlen(program)));
i::CompleteParserRecorder log;
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ i::PreParser preparser(&scanner, &log, stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_natives_syntax(true);
- v8::preparser::PreParser::PreParseResult result =
- preparser.PreParseProgram();
- CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+ i::PreParser::PreParseResult result = preparser.PreParseProgram();
+ CHECK_EQ(i::PreParser::kPreParseSuccess, result);
i::ScriptDataImpl data(log.ExtractData());
CHECK(!data.has_error());
}
@@ -279,7 +279,7 @@ TEST(StandAlonePreParserNoNatives) {
v8::V8::Initialize();
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
const char* programs[] = {
@@ -288,22 +288,21 @@ TEST(StandAlonePreParserNoNatives) {
NULL
};
- uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+ uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(program),
static_cast<unsigned>(strlen(program)));
i::CompleteParserRecorder log;
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
// Preparser defaults to disallowing natives syntax.
- v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ i::PreParser preparser(&scanner, &log, stack_limit);
preparser.set_allow_lazy(true);
- v8::preparser::PreParser::PreParseResult result =
- preparser.PreParseProgram();
- CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+ i::PreParser::PreParseResult result = preparser.PreParseProgram();
+ CHECK_EQ(i::PreParser::kPreParseSuccess, result);
i::ScriptDataImpl data(log.ExtractData());
// Data contains syntax error.
CHECK(data.has_error());
@@ -313,7 +312,7 @@ TEST(StandAlonePreParserNoNatives) {
TEST(RegressChromium62639) {
v8::V8::Initialize();
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
int marker;
isolate->stack_guard()->SetStackLimit(
@@ -337,7 +336,7 @@ TEST(RegressChromium62639) {
TEST(Regress928) {
v8::V8::Initialize();
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
// Preparsing didn't consider the catch clause of a try statement
@@ -352,7 +351,7 @@ TEST(Regress928) {
"try { } catch (e) { var foo = function () { /* first */ } }"
"var bar = function () { /* second */ }";
- v8::HandleScope handles(v8::Isolate::GetCurrent());
+ v8::HandleScope handles(CcTest::isolate());
i::Handle<i::String> source(
factory->NewStringFromAscii(i::CStrVector(program)));
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
@@ -384,7 +383,7 @@ TEST(PreParseOverflow) {
v8::V8::Initialize();
int marker;
- i::Isolate::Current()->stack_guard()->SetStackLimit(
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
size_t kProgramSize = 1024 * 1024;
@@ -392,20 +391,19 @@ TEST(PreParseOverflow) {
memset(*program, '(', kProgramSize);
program[kProgramSize] = '\0';
- uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+ uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(*program),
static_cast<unsigned>(kProgramSize));
i::CompleteParserRecorder log;
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
- v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ i::PreParser preparser(&scanner, &log, stack_limit);
preparser.set_allow_lazy(true);
- v8::preparser::PreParser::PreParseResult result =
- preparser.PreParseProgram();
- CHECK_EQ(v8::preparser::PreParser::kPreParseStackOverflow, result);
+ i::PreParser::PreParseResult result = preparser.PreParseProgram();
+ CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
}
@@ -437,7 +435,7 @@ void TestCharacterStream(const char* ascii_source,
unsigned end = 0) {
if (end == 0) end = length;
unsigned sub_length = end - start;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope test_scope(isolate);
i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
@@ -544,7 +542,7 @@ void TestCharacterStream(const char* ascii_source,
TEST(CharacterStreams) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -619,7 +617,7 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream);
int i = 0;
@@ -701,7 +699,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(re_source),
static_cast<unsigned>(strlen(re_source)));
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
i::Token::Value start = scanner.peek();
@@ -990,11 +988,11 @@ TEST(ScopePositions) {
{ NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
};
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
- v8::HandleScope handles(v8::Isolate::GetCurrent());
- v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent());
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
int marker;
@@ -1027,11 +1025,11 @@ TEST(ScopePositions) {
parser.set_allow_harmony_scoping(true);
info.MarkAsGlobal();
info.SetLanguageMode(source_data[i].language_mode);
- i::FunctionLiteral* function = parser.ParseProgram();
- CHECK(function != NULL);
+ parser.Parse();
+ CHECK(info.function() != NULL);
// Check scope types and positions.
- i::Scope* scope = function->scope();
+ i::Scope* scope = info.function()->scope();
CHECK(scope->is_global_scope());
CHECK_EQ(scope->start_position(), 0);
CHECK_EQ(scope->end_position(), kProgramSize);
@@ -1048,7 +1046,7 @@ TEST(ScopePositions) {
i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
const char* message = data->BuildMessage();
i::Handle<i::String> format = v8::Utils::OpenHandle(
@@ -1087,30 +1085,25 @@ enum ParserFlag {
kAllowModules,
kAllowGenerators,
kAllowForOf,
- kAllowHarmonyNumericLiterals,
- kParserFlagCount
+ kAllowHarmonyNumericLiterals
};
-static bool checkParserFlag(unsigned flags, ParserFlag flag) {
- return flags & (1 << flag);
+void SetParserFlags(i::ParserBase* parser, i::EnumSet<ParserFlag> flags) {
+ parser->set_allow_lazy(flags.Contains(kAllowLazy));
+ parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
+ parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
+ parser->set_allow_modules(flags.Contains(kAllowModules));
+ parser->set_allow_generators(flags.Contains(kAllowGenerators));
+ parser->set_allow_for_of(flags.Contains(kAllowForOf));
+ parser->set_allow_harmony_numeric_literals(
+ flags.Contains(kAllowHarmonyNumericLiterals));
}
-#define SET_PARSER_FLAGS(parser, flags) \
- parser.set_allow_lazy(checkParserFlag(flags, kAllowLazy)); \
- parser.set_allow_natives_syntax(checkParserFlag(flags, \
- kAllowNativesSyntax)); \
- parser.set_allow_harmony_scoping(checkParserFlag(flags, \
- kAllowHarmonyScoping)); \
- parser.set_allow_modules(checkParserFlag(flags, kAllowModules)); \
- parser.set_allow_generators(checkParserFlag(flags, kAllowGenerators)); \
- parser.set_allow_for_of(checkParserFlag(flags, kAllowForOf)); \
- parser.set_allow_harmony_numeric_literals( \
- checkParserFlag(flags, kAllowHarmonyNumericLiterals));
-
-void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
- i::Isolate* isolate = i::Isolate::Current();
+void TestParserSyncWithFlags(i::Handle<i::String> source,
+ i::EnumSet<ParserFlag> flags) {
+ i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
@@ -1120,12 +1113,11 @@ void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
{
i::Scanner scanner(isolate->unicode_cache());
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
- v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
- SET_PARSER_FLAGS(preparser, flags);
+ i::PreParser preparser(&scanner, &log, stack_limit);
+ SetParserFlags(&preparser, flags);
scanner.Initialize(&stream);
- v8::preparser::PreParser::PreParseResult result =
- preparser.PreParseProgram();
- CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+ i::PreParser::PreParseResult result = preparser.PreParseProgram();
+ CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
i::ScriptDataImpl data(log.ExtractData());
@@ -1135,9 +1127,10 @@ void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
i::Handle<i::Script> script = factory->NewScript(source);
i::CompilationInfoWithZone info(script);
i::Parser parser(&info);
- SET_PARSER_FLAGS(parser, flags);
+ SetParserFlags(&parser, flags);
info.MarkAsGlobal();
- function = parser.ParseProgram();
+ parser.Parse();
+ function = info.function();
}
// Check that preparsing fails iff parsing fails.
@@ -1188,9 +1181,17 @@ void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
}
-void TestParserSync(i::Handle<i::String> source) {
- for (unsigned flags = 0; flags < (1 << kParserFlagCount); ++flags) {
- TestParserSyncWithFlags(source, flags);
+void TestParserSync(const char* source,
+ const ParserFlag* flag_list,
+ size_t flag_list_length) {
+ i::Handle<i::String> str =
+ CcTest::i_isolate()->factory()->NewStringFromAscii(i::CStrVector(source));
+ for (int bits = 0; bits < (1 << flag_list_length); bits++) {
+ i::EnumSet<ParserFlag> flags;
+ for (size_t flag_index = 0; flag_index < flag_list_length; flag_index++) {
+ if ((bits & (1 << flag_index)) != 0) flags.Add(flag_list[flag_index]);
+ }
+ TestParserSyncWithFlags(str, flags);
}
}
@@ -1264,17 +1265,18 @@ TEST(ParserSync) {
NULL
};
- i::Isolate* isolate = i::Isolate::Current();
- i::Factory* factory = isolate->factory();
-
- v8::HandleScope handles(v8::Isolate::GetCurrent());
- v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent());
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
int marker;
- isolate->stack_guard()->SetStackLimit(
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+ static const ParserFlag flags1[] = {
+ kAllowLazy, kAllowHarmonyScoping, kAllowModules, kAllowGenerators,
+ kAllowForOf
+ };
for (int i = 0; context_data[i][0] != NULL; ++i) {
for (int j = 0; statement_data[j] != NULL; ++j) {
for (int k = 0; termination_data[k] != NULL; ++k) {
@@ -1294,12 +1296,20 @@ TEST(ParserSync) {
termination_data[k],
context_data[i][1]);
CHECK(length == kProgramSize);
- i::Handle<i::String> source =
- factory->NewStringFromAscii(i::CStrVector(program.start()));
- TestParserSync(source);
+ TestParserSync(program.start(), flags1, ARRAY_SIZE(flags1));
}
}
}
+
+ // Neither Harmony numeric literals nor our natives syntax have any
+ // interaction with the flags above, so test these separately to reduce
+ // the combinatorial explosion.
+ static const ParserFlag flags2[] = { kAllowHarmonyNumericLiterals };
+ TestParserSync("0o1234", flags2, ARRAY_SIZE(flags2));
+ TestParserSync("0b1011", flags2, ARRAY_SIZE(flags2));
+
+ static const ParserFlag flags3[] = { kAllowNativesSyntax };
+ TestParserSync("%DebugPrint(123)", flags3, ARRAY_SIZE(flags3));
}
@@ -1308,9 +1318,9 @@ TEST(PreparserStrictOctal) {
// such (issue 2220).
v8::internal::FLAG_min_preparse_length = 1; // Force preparsing.
v8::V8::Initialize();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Context::Scope context_scope(
- v8::Context::New(v8::Isolate::GetCurrent()));
+ v8::Context::New(CcTest::isolate()));
v8::TryCatch try_catch;
const char* script =
"\"use strict\"; \n"
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 079cbd121b..36ad487079 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -70,7 +70,7 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(StackAlignment) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->Set(v8_str("get_stack_pointer"),
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 7504b171de..47146ecc48 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -399,7 +399,7 @@ class TestSetup {
TEST(RecordTickSample) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::i_isolate()->heap());
+ CpuProfilesCollection profiles(CcTest::heap());
profiles.StartProfiling("", 1, false);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
@@ -465,7 +465,7 @@ static void CheckNodeIds(ProfileNode* node, int* expectedId) {
TEST(SampleIds) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::i_isolate()->heap());
+ CpuProfilesCollection profiles(CcTest::heap());
profiles.StartProfiling("", 1, true);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
@@ -513,7 +513,7 @@ TEST(SampleIds) {
TEST(NoSamples) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::i_isolate()->heap());
+ CpuProfilesCollection profiles(CcTest::heap());
profiles.StartProfiling("", 1, false);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
@@ -605,14 +605,14 @@ TEST(RecordStackTraceAtStartProfiling) {
// don't appear in the stack trace.
i::FLAG_use_inlining = false;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
const char* extensions[] = { "v8/profiler" };
v8::ExtensionConfiguration config(1, extensions);
v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
context->Enter();
- CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
+ CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
CHECK_EQ(0, profiler->GetProfilesCount());
CompileRun(
"function c() { startProfiling(); }\n"
@@ -652,7 +652,7 @@ TEST(RecordStackTraceAtStartProfiling) {
TEST(Issue51919) {
- CpuProfilesCollection collection(CcTest::i_isolate()->heap());
+ CpuProfilesCollection collection(CcTest::heap());
i::EmbeddedVector<char*,
CpuProfilesCollection::kMaxSimultaneousProfiles> titles;
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
@@ -744,7 +744,7 @@ static const char* line_number_test_source_profile_time_functions =
"function lazy_func_at_6th_line() {}";
int GetFunctionLineNumber(LocalContext* env, const char* name) {
- CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
+ CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
CodeMap* code_map = profiler->generator()->code_map();
i::Handle<i::JSFunction> func = v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(
@@ -761,7 +761,7 @@ TEST(LineNumber) {
CcTest::InitializeVM();
LocalContext env;
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = CcTest::i_isolate();
TestSetup test_setup;
i::HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-random.cc b/deps/v8/test/cctest/test-random.cc
index 4227326a92..ea1f36f24c 100644
--- a/deps/v8/test/cctest/test-random.cc
+++ b/deps/v8/test/cctest/test-random.cc
@@ -69,8 +69,8 @@ void TestSeeds(Handle<JSFunction> fun,
TEST(CrankshaftRandom) {
v8::V8::Initialize();
// Skip test if crankshaft is disabled.
- if (!Isolate::Current()->use_crankshaft()) return;
- v8::Isolate* v8_isolate = v8::Isolate::GetCurrent();
+ if (!CcTest::i_isolate()->use_crankshaft()) return;
+ v8::Isolate* v8_isolate = CcTest::isolate();
v8::HandleScope scope(v8_isolate);
v8::Context::Scope context_scope(v8::Context::New(v8_isolate));
@@ -82,7 +82,7 @@ TEST(CrankshaftRandom) {
CompileRun("function f() { return Math.random(); }");
- Object* string = Isolate::Current()->factory()->InternalizeOneByteString(
+ Object* string = CcTest::i_isolate()->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("f"))->ToObjectChecked();
MaybeObject* fun_object =
context->global_object()->GetProperty(String::cast(string));
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 14989ee980..cc946464b2 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -71,9 +71,9 @@ using namespace v8::internal;
static bool CheckParse(const char* input) {
V8::Initialize(NULL);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- Zone zone(Isolate::Current());
- FlatStringReader reader(Isolate::Current(), CStrVector(input));
+ v8::HandleScope scope(CcTest::isolate());
+ Zone zone(CcTest::i_isolate());
+ FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
return v8::internal::RegExpParser::ParseRegExp(
&reader, false, &result, &zone);
@@ -82,9 +82,9 @@ static bool CheckParse(const char* input) {
static SmartArrayPointer<const char> Parse(const char* input) {
V8::Initialize(NULL);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- Zone zone(Isolate::Current());
- FlatStringReader reader(Isolate::Current(), CStrVector(input));
+ v8::HandleScope scope(CcTest::isolate());
+ Zone zone(CcTest::i_isolate());
+ FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
&reader, false, &result, &zone));
@@ -97,9 +97,9 @@ static SmartArrayPointer<const char> Parse(const char* input) {
static bool CheckSimple(const char* input) {
V8::Initialize(NULL);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- Zone zone(Isolate::Current());
- FlatStringReader reader(Isolate::Current(), CStrVector(input));
+ v8::HandleScope scope(CcTest::isolate());
+ Zone zone(CcTest::i_isolate());
+ FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
&reader, false, &result, &zone));
@@ -116,9 +116,9 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
V8::Initialize(NULL);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- Zone zone(Isolate::Current());
- FlatStringReader reader(Isolate::Current(), CStrVector(input));
+ v8::HandleScope scope(CcTest::isolate());
+ Zone zone(CcTest::i_isolate());
+ FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
&reader, false, &result, &zone));
@@ -390,9 +390,9 @@ TEST(ParserRegression) {
static void ExpectError(const char* input,
const char* expected) {
V8::Initialize(NULL);
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- Zone zone(Isolate::Current());
- FlatStringReader reader(Isolate::Current(), CStrVector(input));
+ v8::HandleScope scope(CcTest::isolate());
+ Zone zone(CcTest::i_isolate());
+ FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(!v8::internal::RegExpParser::ParseRegExp(
&reader, false, &result, &zone));
@@ -404,7 +404,6 @@ static void ExpectError(const char* input,
TEST(Errors) {
- V8::Initialize(NULL);
const char* kEndBackslash = "\\ at end of pattern";
ExpectError("\\", kEndBackslash);
const char* kUnterminatedGroup = "Unterminated group";
@@ -475,7 +474,7 @@ static bool NotWord(uc16 c) {
static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(2, &zone);
CharacterRange::AddClassEscape(c, ranges, &zone);
@@ -507,7 +506,7 @@ static RegExpNode* Compile(const char* input,
bool is_ascii,
Zone* zone) {
V8::Initialize(NULL);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
FlatStringReader reader(isolate, CStrVector(input));
RegExpCompileData compile_data;
if (!v8::internal::RegExpParser::ParseRegExp(&reader, multiline,
@@ -533,8 +532,8 @@ static void Execute(const char* input,
bool multiline,
bool is_ascii,
bool dot_output = false) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- Zone zone(Isolate::Current());
+ v8::HandleScope scope(CcTest::isolate());
+ Zone zone(CcTest::i_isolate());
RegExpNode* node = Compile(input, multiline, is_ascii, &zone);
USE(node);
#ifdef DEBUG
@@ -574,7 +573,7 @@ static unsigned PseudoRandom(int i, int j) {
TEST(SplayTreeSimple) {
v8::internal::V8::Initialize(NULL);
static const unsigned kLimit = 1000;
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneSplayTree<TestConfig> tree(&zone);
bool seen[kLimit];
for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
@@ -642,7 +641,7 @@ TEST(DispatchTableConstruction) {
}
}
// Enter test data into dispatch table.
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
DispatchTable table(&zone);
for (int i = 0; i < kRangeCount; i++) {
uc16* range = ranges[i];
@@ -710,8 +709,8 @@ typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
class ContextInitializer {
public:
ContextInitializer()
- : scope_(v8::Isolate::GetCurrent()),
- env_(v8::Context::New(v8::Isolate::GetCurrent())) {
+ : scope_(CcTest::isolate()),
+ env_(v8::Context::New(CcTest::isolate())) {
env_->Enter();
}
~ContextInitializer() {
@@ -737,14 +736,14 @@ static ArchRegExpMacroAssembler::Result Execute(Code* code,
input_end,
captures,
0,
- Isolate::Current());
+ CcTest::i_isolate());
}
TEST(MacroAssemblerNativeSuccess) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -781,7 +780,7 @@ TEST(MacroAssemblerNativeSuccess) {
TEST(MacroAssemblerNativeSimple) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -847,7 +846,7 @@ TEST(MacroAssemblerNativeSimple) {
TEST(MacroAssemblerNativeSimpleUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -918,7 +917,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
TEST(MacroAssemblerNativeBacktrack) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -958,7 +957,7 @@ TEST(MacroAssemblerNativeBacktrack) {
TEST(MacroAssemblerNativeBackReferenceASCII) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1007,7 +1006,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
TEST(MacroAssemblerNativeBackReferenceUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1059,7 +1058,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
TEST(MacroAssemblernativeAtStart) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1118,7 +1117,7 @@ TEST(MacroAssemblernativeAtStart) {
TEST(MacroAssemblerNativeBackRefNoCase) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1177,7 +1176,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
TEST(MacroAssemblerNativeRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1280,7 +1279,7 @@ TEST(MacroAssemblerNativeRegisters) {
TEST(MacroAssemblerStackOverflow) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1319,7 +1318,7 @@ TEST(MacroAssemblerStackOverflow) {
TEST(MacroAssemblerNativeLotsOfRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Zone zone(isolate);
@@ -1370,7 +1369,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
TEST(MacroAssembler) {
V8::Initialize(NULL);
byte codes[1024];
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024), &zone);
// ^f(o)o.
Label start, fail, backtrack;
@@ -1403,7 +1402,7 @@ TEST(MacroAssembler) {
m.PopRegister(0);
m.Fail();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -1438,7 +1437,7 @@ TEST(AddInverseToTable) {
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(kRangeCount, &zone);
for (int i = 0; i < kRangeCount; i++) {
@@ -1459,7 +1458,7 @@ TEST(AddInverseToTable) {
CHECK_EQ(is_on, set->Get(0) == false);
}
}
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(1, &zone);
ranges->Add(CharacterRange(0xFFF0, 0xFFFE), &zone);
@@ -1572,7 +1571,7 @@ TEST(UncanonicalizeEquivalence) {
static void TestRangeCaseIndependence(CharacterRange input,
Vector<CharacterRange> expected) {
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
int count = expected.length();
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(count, &zone);
@@ -1637,7 +1636,7 @@ static bool InClass(uc16 c, ZoneList<CharacterRange>* ranges) {
TEST(CharClassDifference) {
v8::internal::V8::Initialize(NULL);
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
base->Add(CharacterRange::Everything(), &zone);
@@ -1665,7 +1664,7 @@ TEST(CharClassDifference) {
TEST(CanonicalizeCharacterSets) {
v8::internal::V8::Initialize(NULL);
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(4, &zone);
CharacterSet set(list);
@@ -1727,7 +1726,7 @@ TEST(CanonicalizeCharacterSets) {
TEST(CharacterRangeMerge) {
v8::internal::V8::Initialize(NULL);
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ZoneList<CharacterRange> l1(4, &zone);
ZoneList<CharacterRange> l2(4, &zone);
// Create all combinations of intersections of ranges, both singletons and
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 099f3a05a9..4132d2d4cf 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -84,7 +84,7 @@ static int* counter_function(const char* name) {
template <class T>
static Address AddressOf(T id) {
- return ExternalReference(id, i::Isolate::Current()).address();
+ return ExternalReference(id, CcTest::i_isolate()).address();
}
@@ -100,7 +100,7 @@ static int make_code(TypeCode type, int id) {
TEST(ExternalReferenceEncoder) {
- Isolate* isolate = i::Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
isolate->stats_table()->SetCounterFunction(counter_function);
v8::V8::Initialize();
@@ -137,7 +137,7 @@ TEST(ExternalReferenceEncoder) {
TEST(ExternalReferenceDecoder) {
- Isolate* isolate = i::Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
isolate->stats_table()->SetCounterFunction(counter_function);
v8::V8::Initialize();
@@ -251,20 +251,22 @@ static void Serialize() {
// can be loaded from v8natives.js and their addresses can be processed. This
// will clear the pending fixups array, which would otherwise contain GC roots
// that would confuse the serialization/deserialization process.
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
{
v8::HandleScope scope(isolate);
v8::Context::New(isolate);
}
- WriteToFile(reinterpret_cast<Isolate*>(isolate),
- FLAG_testing_serialization_file);
+
+ Isolate* internal_isolate = CcTest::i_isolate();
+ internal_isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "serialize");
+ WriteToFile(internal_isolate, FLAG_testing_serialization_file);
}
// Test that the whole heap can be serialized.
TEST(Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- Serializer::Enable(Isolate::Current());
+ Serializer::Enable(CcTest::i_isolate());
v8::V8::Initialize();
Serialize();
}
@@ -274,7 +276,7 @@ TEST(Serialize) {
// Test that heap serialization is non-destructive.
TEST(SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- Serializer::Enable(Isolate::Current());
+ Serializer::Enable(CcTest::i_isolate());
v8::V8::Initialize();
Serialize();
Serialize();
@@ -291,14 +293,14 @@ static void Deserialize() {
static void SanityCheck() {
- Isolate* isolate = Isolate::Current();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ Isolate* isolate = CcTest::i_isolate();
+ v8::HandleScope scope(CcTest::isolate());
#ifdef VERIFY_HEAP
- HEAP->Verify();
+ CcTest::heap()->Verify();
#endif
CHECK(isolate->global_object()->IsJSObject());
CHECK(isolate->native_context()->IsContext());
- CHECK(HEAP->string_table()->IsStringTable());
+ CHECK(CcTest::heap()->string_table()->IsStringTable());
CHECK(!isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("Empty"))->IsFailure());
}
@@ -309,7 +311,7 @@ DEPENDENT_TEST(Deserialize, Serialize) {
// serialization. That doesn't matter. We don't need to be able to
// serialize a snapshot in a VM that is booted from a snapshot.
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Deserialize();
@@ -323,7 +325,7 @@ DEPENDENT_TEST(Deserialize, Serialize) {
DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Deserialize();
@@ -337,7 +339,7 @@ DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Deserialize();
@@ -355,7 +357,7 @@ DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Deserialize();
@@ -372,7 +374,7 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
TEST(PartialSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Serializer::Enable(isolate);
v8::V8::Initialize();
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -495,7 +497,7 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
int snapshot_size = 0;
byte* snapshot = ReadBytes(file_name, &snapshot_size);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Object* root;
{
SnapshotByteSource source(snapshot, snapshot_size);
@@ -523,7 +525,7 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
TEST(ContextSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Serializer::Enable(isolate);
v8::V8::Initialize();
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -607,7 +609,7 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
int snapshot_size = 0;
byte* snapshot = ReadBytes(file_name, &snapshot_size);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Object* root;
{
SnapshotByteSource source(snapshot, snapshot_size);
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index 3326a015de..73710658a2 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -72,7 +72,7 @@ TEST(Page) {
Page* p = Page::FromAddress(page_start);
// Initialized Page has heap pointer, normally set by memory_allocator.
- p->heap_ = HEAP;
+ p->heap_ = CcTest::heap();
CHECK(p->address() == page_start);
CHECK(p->is_valid());
@@ -207,7 +207,7 @@ static unsigned int Pseudorandom() {
TEST(MemoryChunk) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
CHECK(heap->ConfigureHeapDefault());
@@ -263,7 +263,7 @@ TEST(MemoryChunk) {
TEST(MemoryAllocator) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
CHECK(isolate->heap()->ConfigureHeapDefault());
@@ -312,7 +312,7 @@ TEST(MemoryAllocator) {
TEST(NewSpace) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
CHECK(heap->ConfigureHeapDefault());
@@ -323,8 +323,8 @@ TEST(NewSpace) {
NewSpace new_space(heap);
- CHECK(new_space.SetUp(HEAP->ReservedSemiSpaceSize(),
- HEAP->ReservedSemiSpaceSize()));
+ CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
+ CcTest::heap()->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
@@ -341,7 +341,7 @@ TEST(NewSpace) {
TEST(OldSpace) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
CHECK(heap->ConfigureHeapDefault());
@@ -372,7 +372,7 @@ TEST(OldSpace) {
TEST(LargeObjectSpace) {
v8::V8::Initialize();
- LargeObjectSpace* lo = HEAP->lo_space();
+ LargeObjectSpace* lo = CcTest::heap()->lo_space();
CHECK(lo != NULL);
int lo_size = Page::kPageSize;
@@ -400,3 +400,25 @@ TEST(LargeObjectSpace) {
CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
}
+
+
+TEST(SizeOfFirstPageIsLargeEnough) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+
+ // Freshly initialized VM gets by with one page per space.
+ for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
+ CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
+ }
+
+ // Executing the empty script gets by with one page per space.
+ HandleScope scope(isolate);
+ CompileRun("/*empty*/");
+ for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
+ CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
+ }
+
+ // No large objects required to perform the above steps.
+ CHECK(isolate->heap()->lo_space()->IsEmpty());
+}
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 9049df1b72..4aa74a8191 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -137,7 +137,7 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
Zone* zone) {
// A list of pointers that we don't have any interest in cleaning up.
// If they are reachable from a root then leak detection won't complain.
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
for (int i = 0; i < bb_length; i++) {
int len = rng->next(16);
@@ -290,7 +290,7 @@ ConsStringGenerationData::ConsStringGenerationData(bool long_blocks,
rng_.init();
InitializeBuildingBlocks(
building_blocks_, kNumberOfBuildingBlocks, long_blocks, &rng_, zone);
- empty_string_ = Isolate::Current()->heap()->empty_string();
+ empty_string_ = CcTest::heap()->empty_string();
Reset();
}
@@ -403,7 +403,7 @@ void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) {
static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
unsigned max_recursion) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
// Compute termination characteristics.
bool terminate = false;
bool flat = data->rng_.next(data->empty_leaf_threshold_);
@@ -465,7 +465,7 @@ static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
static Handle<String> ConstructLeft(
ConsStringGenerationData* data,
int depth) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
Handle<String> answer = factory->NewStringFromAscii(CStrVector(""));
data->stats_.leaves_++;
for (int i = 0; i < depth; i++) {
@@ -483,7 +483,7 @@ static Handle<String> ConstructLeft(
static Handle<String> ConstructRight(
ConsStringGenerationData* data,
int depth) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
Handle<String> answer = factory->NewStringFromAscii(CStrVector(""));
data->stats_.leaves_++;
for (int i = depth - 1; i >= 0; i--) {
@@ -502,7 +502,7 @@ static Handle<String> ConstructBalancedHelper(
ConsStringGenerationData* data,
int from,
int to) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
CHECK(to > from);
if (to - from == 1) {
data->stats_.chars_ += data->block(from)->length();
@@ -571,7 +571,7 @@ TEST(Traverse) {
printf("TestTraverse\n");
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- Zone zone(Isolate::Current());
+ Zone zone(CcTest::i_isolate());
ConsStringGenerationData data(false, &zone);
Handle<String> flat = ConstructBalanced(&data);
FlattenString(flat);
@@ -659,7 +659,7 @@ printf(
template<typename BuildString>
void TestStringCharacterStream(BuildString build, int test_cases) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
Zone zone(isolate);
ConsStringGenerationData data(true, &zone);
@@ -697,7 +697,7 @@ static const int kCharacterStreamNonRandomCases = 8;
static Handle<String> BuildEdgeCaseConsString(
int test_case, ConsStringGenerationData* data) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
data->Reset();
switch (test_case) {
case 0:
@@ -860,7 +860,7 @@ static const int DEEP_ASCII_DEPTH = 100000;
TEST(DeepAscii) {
printf("TestDeepAscii\n");
CcTest::InitializeVM();
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
char* foo = NewArray<char>(DEEP_ASCII_DEPTH);
@@ -930,10 +930,10 @@ TEST(Utf8Conversion) {
TEST(ExternalShortStringAdd) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate);
- CcTest::InitializeVM();
+ LocalContext context;
v8::HandleScope handle_scope(CcTest::isolate());
// Make sure we cover all always-flat lengths and at least one above.
@@ -974,7 +974,7 @@ TEST(ExternalShortStringAdd) {
}
// Add the arrays with the short external strings in the global object.
- v8::Handle<v8::Object> global = CcTest::env()->Global();
+ v8::Handle<v8::Object> global = context->Global();
global->Set(v8_str("external_ascii"), ascii_external_strings);
global->Set(v8_str("external_non_ascii"), non_ascii_external_strings);
global->Set(v8_str("max_length"), v8::Integer::New(kMaxLength));
@@ -1018,9 +1018,9 @@ TEST(ExternalShortStringAdd) {
TEST(JSONStringifySliceMadeExternal) {
- Isolate* isolate = Isolate::Current();
- Zone zone(isolate);
CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Zone zone(isolate);
// Create a sliced string from a one-byte string. The latter is turned
// into a two-byte external string. Check that JSON.stringify works.
v8::HandleScope handle_scope(CcTest::isolate());
@@ -1048,13 +1048,13 @@ TEST(JSONStringifySliceMadeExternal) {
TEST(CachedHashOverflow) {
+ CcTest::InitializeVM();
// We incorrectly allowed strings to be tagged as array indices even if their
// values didn't fit in the hash field.
// See http://code.google.com/p/v8/issues/detail?id=728
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate);
- CcTest::InitializeVM();
v8::HandleScope handle_scope(CcTest::isolate());
// Lines must be executed sequentially. Combining them into one script
// makes the bug go away.
@@ -1098,7 +1098,7 @@ TEST(CachedHashOverflow) {
TEST(SliceFromCons) {
FLAG_string_slices = true;
CcTest::InitializeVM();
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<String> string =
factory->NewStringFromAscii(CStrVector("parentparentparent"));
@@ -1133,7 +1133,7 @@ class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
TEST(SliceFromExternal) {
FLAG_string_slices = true;
CcTest::InitializeVM();
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
AsciiVectorResource resource(
i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
@@ -1153,7 +1153,7 @@ TEST(TrivialSlice) {
// actually creates a new string (it should not).
FLAG_string_slices = true;
CcTest::InitializeVM();
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
Handle<String> string;
@@ -1227,7 +1227,7 @@ TEST(AsciiArrayJoin) {
"for (var i = 1; i <= two_14; i++) a.push(s);"
"a.join("");";
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8::V8::IgnoreOutOfMemoryException();
v8::Local<v8::Script> script =
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index 6a8323bea4..a04ffa70c5 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -15,7 +15,7 @@ using namespace v8::internal;
TEST(Create) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
const int kNumSymbols = 30;
@@ -37,8 +37,8 @@ TEST(Create) {
#endif
}
- HEAP->PerformScavenge();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// All symbols should be distinct.
for (int i = 0; i < kNumSymbols; ++i) {
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index b89f3ef8cf..13f594096f 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -39,8 +39,8 @@ void Signal(const v8::FunctionCallbackInfo<v8::Value>& args) {
void TerminateCurrentThread(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(!v8::V8::IsExecutionTerminating());
- v8::V8::TerminateExecution();
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ v8::V8::TerminateExecution(args.GetIsolate());
}
@@ -50,18 +50,18 @@ void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Loop(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
v8::Handle<v8::String> source =
v8::String::New("try { doloop(); fail(); } catch(e) { fail(); }");
v8::Handle<v8::Value> result = v8::Script::Compile(source)->Run();
CHECK(result.IsEmpty());
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
}
void DoLoop(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
v8::Script::Compile(v8::String::New("function f() {"
" var term = true;"
" try {"
@@ -79,13 +79,13 @@ void DoLoop(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
}
void DoLoopNoCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
v8::Script::Compile(v8::String::New("var term = true;"
"while(true) {"
" if (term) terminate();"
@@ -95,7 +95,7 @@ void DoLoopNoCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
}
@@ -115,19 +115,19 @@ v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
// Test that a single thread of JavaScript execution can terminate
// itself.
TEST(TerminateOnlyV8ThreadFromThreadItself) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, DoLoop);
v8::Handle<v8::Context> context =
- v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
+ v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
// Test that we can run the code again after thread termination.
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
v8::Script::Compile(source)->Run();
}
@@ -135,18 +135,18 @@ TEST(TerminateOnlyV8ThreadFromThreadItself) {
// Test that a single thread of JavaScript execution can terminate
// itself in a loop that performs no calls.
TEST(TerminateOnlyV8ThreadFromThreadItselfNoLoop) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, DoLoopNoCall);
v8::Handle<v8::Context> context =
- v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
+ v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Test that we can run the code again after thread termination.
v8::Script::Compile(source)->Run();
}
@@ -172,15 +172,15 @@ class TerminatorThread : public v8::internal::Thread {
// from the side by another thread.
TEST(TerminateOnlyV8ThreadFromOtherThread) {
semaphore = new v8::internal::Semaphore(0);
- TerminatorThread thread(i::Isolate::Current());
+ TerminatorThread thread(CcTest::i_isolate());
thread.Start();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(Signal, DoLoop);
v8::Handle<v8::Context> context =
- v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
+ v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
@@ -197,8 +197,8 @@ int call_count = 0;
void TerminateOrReturnObject(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (++call_count == 10) {
- CHECK(!v8::V8::IsExecutionTerminating());
- v8::V8::TerminateExecution();
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ v8::V8::TerminateExecution(args.GetIsolate());
return;
}
v8::Local<v8::Object> result = v8::Object::New();
@@ -209,7 +209,7 @@ void TerminateOrReturnObject(const v8::FunctionCallbackInfo<v8::Value>& args) {
void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
v8::Script::Compile(v8::String::New("function f() {"
" try {"
" while(true) {"
@@ -225,14 +225,14 @@ void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
}
// Test that we correctly handle termination exceptions if they are
// triggered by the creation of error objects in connection with ICs.
TEST(TerminateLoadICException) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
global->Set(v8::String::New("terminate_or_return_object"),
v8::FunctionTemplate::New(TerminateOrReturnObject));
@@ -241,16 +241,16 @@ TEST(TerminateLoadICException) {
v8::FunctionTemplate::New(LoopGetProperty));
v8::Handle<v8::Context> context =
- v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
+ v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
call_count = 0;
v8::Script::Compile(source)->Run();
// Test that we can run the code again after thread termination.
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
call_count = 0;
v8::Script::Compile(source)->Run();
}
@@ -258,7 +258,7 @@ TEST(TerminateLoadICException) {
void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
v8::Script::Compile(v8::String::New("function f() {"
" var term = true;"
" try {"
@@ -276,7 +276,7 @@ void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
v8::Script::Compile(v8::String::New("function f() { fail(); } f()"))->Run();
}
@@ -284,17 +284,17 @@ void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Test that reentry into V8 while the termination exception is still pending
// (has not yet unwound the 0-level JS frame) does not crash.
TEST(TerminateAndReenterFromThreadItself) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, ReenterAfterTermination);
v8::Handle<v8::Context> context =
- v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
+ v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
// Check we can run JS again after termination.
CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
"f()"))->Run()->IsTrue());
@@ -316,7 +316,7 @@ void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(!try_catch.CanContinue());
CHECK(v8::V8::IsExecutionTerminating());
CHECK(try_catch.HasTerminated());
- v8::V8::CancelTerminateExecution(v8::Isolate::GetCurrent());
+ v8::V8::CancelTerminateExecution(CcTest::isolate());
CHECK(!v8::V8::IsExecutionTerminating());
}
@@ -324,13 +324,13 @@ void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Test that a single thread of JavaScript execution can terminate
// itself and then resume execution.
TEST(TerminateCancelTerminateFromThreadItself) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, DoLoopCancelTerminate);
v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
v8::Handle<v8::String> source =
v8::String::New("try { doloop(); } catch(e) { fail(); } 'completed';");
// Check that execution completed with correct return value.
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 6cc5f52338..4709961636 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -34,21 +34,21 @@
TEST(Preemption) {
- v8::Isolate* isolate = CcTest::default_isolate();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Locker locker(isolate);
v8::V8::Initialize();
v8::HandleScope scope(isolate);
v8::Handle<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Locker::StartPreemption(100);
+ v8::Locker::StartPreemption(isolate, 100);
v8::Handle<v8::Script> script = v8::Script::Compile(
v8::String::New("var count = 0; var obj = new Object(); count++;\n"));
script->Run();
- v8::Locker::StopPreemption();
+ v8::Locker::StopPreemption(isolate);
v8::internal::OS::Sleep(500); // Make sure the timer fires.
script->Run();
@@ -69,8 +69,9 @@ class ThreadA : public v8::internal::Thread {
public:
ThreadA() : Thread("ThreadA") { }
void Run() {
- v8::Isolate* isolate = CcTest::default_isolate();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Locker locker(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope scope(isolate);
v8::Handle<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -90,7 +91,7 @@ class ThreadA : public v8::internal::Thread {
turn = CLEAN_CACHE;
do {
{
- v8::Unlocker unlocker(CcTest::default_isolate());
+ v8::Unlocker unlocker(CcTest::isolate());
Thread::YieldCPU();
}
} while (turn != SECOND_TIME_FILL_CACHE);
@@ -109,15 +110,16 @@ class ThreadB : public v8::internal::Thread {
void Run() {
do {
{
- v8::Isolate* isolate = CcTest::default_isolate();
+ v8::Isolate* isolate = CcTest::isolate();
v8::Locker locker(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
if (turn == CLEAN_CACHE) {
v8::HandleScope scope(isolate);
v8::Handle<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
// Clear the caches by forcing major GC.
- HEAP->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
+ CcTest::heap()->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
turn = SECOND_TIME_FILL_CACHE;
break;
}
@@ -130,8 +132,6 @@ class ThreadB : public v8::internal::Thread {
TEST(JSFunctionResultCachesInTwoThreads) {
- v8::V8::Initialize();
-
ThreadA threadA;
ThreadB threadB;
diff --git a/deps/v8/test/cctest/test-time.cc b/deps/v8/test/cctest/test-time.cc
index 8b92d8d32a..28d647a5c1 100644
--- a/deps/v8/test/cctest/test-time.cc
+++ b/deps/v8/test/cctest/test-time.cc
@@ -133,7 +133,7 @@ TEST(TimeTicksIsMonotonic) {
timer.Start();
while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
TimeTicks normal_ticks = TimeTicks::Now();
- TimeTicks highres_ticks = TimeTicks::HighResNow();
+ TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
CHECK_GE(normal_ticks, previous_normal_ticks);
CHECK_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
CHECK_GE(highres_ticks, previous_highres_ticks);
@@ -142,3 +142,54 @@ TEST(TimeTicksIsMonotonic) {
previous_highres_ticks = highres_ticks;
}
}
+
+
+template <typename T>
+static void ResolutionTest(T (*Now)(), TimeDelta target_granularity) {
+ // We're trying to measure that intervals increment in a VERY small amount
+ // of time -- according to the specified target granularity. Unfortunately,
+ // if we happen to have a context switch in the middle of our test, the
+ // context switch could easily exceed our limit. So, we iterate on this
+ // several times. As long as we're able to detect the fine-granularity
+ // timers at least once, then the test has succeeded.
+ static const TimeDelta kExpirationTimeout = TimeDelta::FromSeconds(1);
+ ElapsedTimer timer;
+ timer.Start();
+ TimeDelta delta;
+ do {
+ T start = Now();
+ T now = start;
+ // Loop until we can detect that the clock has changed. Non-HighRes timers
+ // will increment in chunks, i.e. 15ms. By spinning until we see a clock
+ // change, we detect the minimum time between measurements.
+ do {
+ now = Now();
+ delta = now - start;
+ } while (now <= start);
+ CHECK_NE(static_cast<int64_t>(0), delta.InMicroseconds());
+ } while (delta > target_granularity && !timer.HasExpired(kExpirationTimeout));
+ CHECK_LE(delta, target_granularity);
+}
+
+
+TEST(TimeNowResolution) {
+ // We assume that Time::Now() has at least 16ms resolution.
+ static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+ ResolutionTest<Time>(&Time::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicksNowResolution) {
+ // We assume that TimeTicks::Now() has at least 16ms resolution.
+ static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+ ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicksHighResolutionNowResolution) {
+ if (!TimeTicks::IsHighResolutionClockWorking()) return;
+
+ // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
+ static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
+ ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
+}
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index b5f65954fa..264d2ed881 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -111,8 +111,8 @@ class HandlifiedTypes {
Null(Type::Null(), isolate),
Undefined(Type::Undefined(), isolate),
Number(Type::Number(), isolate),
- Integer31(Type::Smi(), isolate),
- Integer32(Type::Signed32(), isolate),
+ Smi(Type::Smi(), isolate),
+ Signed32(Type::Signed32(), isolate),
Double(Type::Double(), isolate),
Name(Type::Name(), isolate),
UniqueName(Type::UniqueName(), isolate),
@@ -128,16 +128,18 @@ class HandlifiedTypes {
array_map(isolate->factory()->NewMap(JS_ARRAY_TYPE, 4 * kPointerSize)),
isolate_(isolate) {
smi = handle(Smi::FromInt(666), isolate);
+ signed32 = isolate->factory()->NewHeapNumber(0x40000000);
object1 = isolate->factory()->NewJSObjectFromMap(object_map);
object2 = isolate->factory()->NewJSObjectFromMap(object_map);
array = isolate->factory()->NewJSArray(20);
- ObjectClass = handle(Type::Class(object_map), isolate);
- ArrayClass = handle(Type::Class(array_map), isolate);
- Integer31Constant = handle(Type::Constant(smi, isolate), isolate);
- ObjectConstant1 = handle(Type::Constant(object1), isolate);
- ObjectConstant2 = handle(Type::Constant(object2), isolate);
- ArrayConstant1 = handle(Type::Constant(array), isolate);
- ArrayConstant2 = handle(Type::Constant(array), isolate);
+ ObjectClass = Class(object_map);
+ ArrayClass = Class(array_map);
+ SmiConstant = Constant(smi);
+ Signed32Constant = Constant(signed32);
+ ObjectConstant1 = Constant(object1);
+ ObjectConstant2 = Constant(object2);
+ ArrayConstant1 = Constant(array);
+ ArrayConstant2 = Constant(array);
}
Handle<Type> None;
@@ -147,8 +149,8 @@ class HandlifiedTypes {
Handle<Type> Null;
Handle<Type> Undefined;
Handle<Type> Number;
- Handle<Type> Integer31;
- Handle<Type> Integer32;
+ Handle<Type> Smi;
+ Handle<Type> Signed32;
Handle<Type> Double;
Handle<Type> Name;
Handle<Type> UniqueName;
@@ -164,7 +166,8 @@ class HandlifiedTypes {
Handle<Type> ObjectClass;
Handle<Type> ArrayClass;
- Handle<Type> Integer31Constant;
+ Handle<Type> SmiConstant;
+ Handle<Type> Signed32Constant;
Handle<Type> ObjectConstant1;
Handle<Type> ObjectConstant2;
Handle<Type> ArrayConstant1;
@@ -173,11 +176,18 @@ class HandlifiedTypes {
Handle<Map> object_map;
Handle<Map> array_map;
- Handle<v8::internal::Smi> smi;
+ Handle<i::Smi> smi;
+ Handle<HeapNumber> signed32;
Handle<JSObject> object1;
Handle<JSObject> object2;
Handle<JSArray> array;
+ Handle<Type> Class(Handle<Map> map) {
+ return handle(Type::Class(map), isolate_);
+ }
+ Handle<Type> Constant(Handle<i::Object> value) {
+ return handle(Type::Constant(value, isolate_), isolate_);
+ }
Handle<Type> Union(Handle<Type> type1, Handle<Type> type2) {
return handle(Type::Union(type1, type2), isolate_);
}
@@ -192,7 +202,7 @@ class HandlifiedTypes {
TEST(Bitset) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
@@ -217,7 +227,7 @@ TEST(Bitset) {
TEST(Class) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
@@ -231,17 +241,17 @@ TEST(Class) {
TEST(Constant) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
- CHECK(IsConstant(*T.Integer31Constant));
+ CHECK(IsConstant(*T.SmiConstant));
CHECK(IsConstant(*T.ObjectConstant1));
CHECK(IsConstant(*T.ObjectConstant2));
CHECK(IsConstant(*T.ArrayConstant1));
CHECK(IsConstant(*T.ArrayConstant2));
- CHECK(*T.smi == AsConstant(*T.Integer31Constant));
+ CHECK(*T.smi == AsConstant(*T.SmiConstant));
CHECK(*T.object1 == AsConstant(*T.ObjectConstant1));
CHECK(*T.object2 == AsConstant(*T.ObjectConstant2));
CHECK(*T.object1 != AsConstant(*T.ObjectConstant2));
@@ -252,7 +262,7 @@ TEST(Constant) {
TEST(Is) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
@@ -278,12 +288,12 @@ TEST(Is) {
CheckUnordered(T.Boolean, T.Undefined);
CheckSub(T.Number, T.Any);
- CheckSub(T.Integer31, T.Number);
- CheckSub(T.Integer32, T.Number);
+ CheckSub(T.Smi, T.Number);
+ CheckSub(T.Signed32, T.Number);
CheckSub(T.Double, T.Number);
- CheckSub(T.Integer31, T.Integer32);
- CheckUnordered(T.Integer31, T.Double);
- CheckUnordered(T.Integer32, T.Double);
+ CheckSub(T.Smi, T.Signed32);
+ CheckUnordered(T.Smi, T.Double);
+ CheckUnordered(T.Signed32, T.Double);
CheckSub(T.Name, T.Any);
CheckSub(T.UniqueName, T.Any);
@@ -308,13 +318,18 @@ TEST(Is) {
CheckUnordered(T.Array, T.Function);
// Structured subtyping
+ CheckSub(T.None, T.ObjectClass);
+ CheckSub(T.None, T.ObjectConstant1);
+ CheckSub(T.ObjectClass, T.Any);
+ CheckSub(T.ObjectConstant1, T.Any);
+
CheckSub(T.ObjectClass, T.Object);
CheckSub(T.ArrayClass, T.Object);
CheckUnordered(T.ObjectClass, T.ArrayClass);
- CheckSub(T.Integer31Constant, T.Integer31);
- CheckSub(T.Integer31Constant, T.Integer32);
- CheckSub(T.Integer31Constant, T.Number);
+ CheckSub(T.SmiConstant, T.Smi);
+ CheckSub(T.SmiConstant, T.Signed32);
+ CheckSub(T.SmiConstant, T.Number);
CheckSub(T.ObjectConstant1, T.Object);
CheckSub(T.ObjectConstant2, T.Object);
CheckSub(T.ArrayConstant1, T.Object);
@@ -332,7 +347,7 @@ TEST(Is) {
TEST(Maybe) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
@@ -348,9 +363,9 @@ TEST(Maybe) {
CheckDisjoint(T.Boolean, T.Undefined);
CheckOverlap(T.Number, T.Any);
- CheckOverlap(T.Integer31, T.Number);
+ CheckOverlap(T.Smi, T.Number);
CheckOverlap(T.Double, T.Number);
- CheckDisjoint(T.Integer32, T.Double);
+ CheckDisjoint(T.Signed32, T.Double);
CheckOverlap(T.Name, T.Any);
CheckOverlap(T.UniqueName, T.Any);
@@ -374,16 +389,19 @@ TEST(Maybe) {
CheckDisjoint(T.Object, T.Proxy);
CheckDisjoint(T.Array, T.Function);
+ CheckOverlap(T.ObjectClass, T.Any);
+ CheckOverlap(T.ObjectConstant1, T.Any);
+
CheckOverlap(T.ObjectClass, T.Object);
CheckOverlap(T.ArrayClass, T.Object);
CheckOverlap(T.ObjectClass, T.ObjectClass);
CheckOverlap(T.ArrayClass, T.ArrayClass);
CheckDisjoint(T.ObjectClass, T.ArrayClass);
- CheckOverlap(T.Integer31Constant, T.Integer31);
- CheckOverlap(T.Integer31Constant, T.Integer32);
- CheckOverlap(T.Integer31Constant, T.Number);
- CheckDisjoint(T.Integer31Constant, T.Double);
+ CheckOverlap(T.SmiConstant, T.Smi);
+ CheckOverlap(T.SmiConstant, T.Signed32);
+ CheckOverlap(T.SmiConstant, T.Number);
+ CheckDisjoint(T.SmiConstant, T.Double);
CheckOverlap(T.ObjectConstant1, T.Object);
CheckOverlap(T.ObjectConstant2, T.Object);
CheckOverlap(T.ArrayConstant1, T.Object);
@@ -403,7 +421,7 @@ TEST(Maybe) {
TEST(Union) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
@@ -422,6 +440,8 @@ TEST(Union) {
CHECK(IsUnion(Type::Union(T.ObjectClass, T.ArrayClass)));
CheckEqual(T.Union(T.ObjectClass, T.ObjectClass), T.ObjectClass);
+ CheckSub(T.None, T.Union(T.ObjectClass, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Any);
CheckSub(T.ObjectClass, T.Union(T.ObjectClass, T.ArrayClass));
CheckSub(T.ArrayClass, T.Union(T.ObjectClass, T.ArrayClass));
CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
@@ -437,6 +457,8 @@ TEST(Union) {
CheckEqual(T.Union(T.ObjectConstant1, T.ObjectConstant1), T.ObjectConstant1);
CheckEqual(T.Union(T.ArrayConstant1, T.ArrayConstant1), T.ArrayConstant1);
CheckEqual(T.Union(T.ArrayConstant1, T.ArrayConstant1), T.ArrayConstant2);
+ CheckSub(T.None, T.Union(T.ObjectConstant1, T.ObjectConstant2));
+ CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Any);
CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2));
CheckSub(T.ObjectConstant2, T.Union(T.ObjectConstant1, T.ObjectConstant2));
CheckSub(T.ArrayConstant2, T.Union(T.ArrayConstant1, T.ObjectConstant2));
@@ -453,32 +475,36 @@ TEST(Union) {
CHECK(IsUnion(Type::Union(T.ObjectClass, T.Number)));
CheckEqual(T.Union(T.ObjectClass, T.Object), T.Object);
+ CheckSub(T.None, T.Union(T.ObjectClass, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Number), T.Any);
- CheckSub(T.Union(T.ObjectClass, T.Integer31), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectClass, T.Smi), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
// Bitset-constant
- CHECK(IsBitset(Type::Union(T.Integer31Constant, T.Number)));
+ CHECK(IsBitset(Type::Union(T.SmiConstant, T.Number)));
CHECK(IsBitset(Type::Union(T.ObjectConstant1, T.Object)));
CHECK(IsUnion(Type::Union(T.ObjectConstant2, T.Number)));
- CheckEqual(T.Union(T.Integer31Constant, T.Number), T.Number);
+ CheckEqual(T.Union(T.SmiConstant, T.Number), T.Number);
CheckEqual(T.Union(T.ObjectConstant1, T.Object), T.Object);
+ CheckSub(T.None, T.Union(T.ObjectConstant1, T.Number));
CheckSub(T.Union(T.ObjectConstant1, T.Number), T.Any);
- CheckSub(
- T.Union(T.ObjectConstant1, T.Integer32), T.Union(T.Object, T.Number));
+ CheckSub(T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
+ CheckEqual(T.Union(T.Signed32, T.Signed32Constant), T.Signed32);
// Class-constant
CHECK(IsUnion(Type::Union(T.ObjectConstant1, T.ObjectClass)));
CHECK(IsUnion(Type::Union(T.ArrayClass, T.ObjectConstant2)));
+ CheckSub(T.None, T.Union(T.ObjectConstant1, T.ArrayClass));
+ CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Any);
CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
CheckSub(T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ArrayClass));
CheckSub(T.ArrayClass, T.Union(T.ObjectConstant1, T.ArrayClass));
@@ -508,6 +534,9 @@ TEST(Union) {
T.ObjectConstant1,
T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
CheckSub(
+ T.None,
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ CheckSub(
T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
T.Any);
CheckSub(
@@ -524,6 +553,12 @@ TEST(Union) {
T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
T.Union(T.ObjectClass, T.ObjectConstant1));
CheckSub(
+ T.None,
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)));
+ CheckSub(
+ T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
+ T.Any);
+ CheckSub(
T.Union(T.ObjectClass, T.Union(T.ObjectConstant1, T.ObjectClass)),
T.Object);
CheckEqual(
@@ -547,7 +582,7 @@ TEST(Union) {
// Union-union
CHECK(IsBitset(Type::Union(
- T.Union(T.Number, T.ArrayClass), T.Union(T.Integer32, T.Array))));
+ T.Union(T.Number, T.ArrayClass), T.Union(T.Signed32, T.Array))));
CHECK(IsUnion(Type::Union(
T.Union(T.Number, T.ArrayClass), T.Union(T.ObjectClass, T.ArrayClass))));
@@ -562,14 +597,14 @@ TEST(Union) {
T.Union(T.ObjectConstant1, T.ArrayConstant2)),
T.Union(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ArrayConstant1));
CheckEqual(
- T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Integer31, T.Array)),
+ T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array)),
T.Union(T.Number, T.Array));
}
TEST(Intersect) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
HandlifiedTypes T(isolate);
@@ -610,12 +645,12 @@ TEST(Intersect) {
CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
// Bitset-constant
- CHECK(IsBitset(Type::Intersect(T.Integer31, T.Number)));
- CHECK(IsConstant(Type::Intersect(T.Integer31Constant, T.Number)));
+ CHECK(IsBitset(Type::Intersect(T.Smi, T.Number)));
+ CHECK(IsConstant(Type::Intersect(T.SmiConstant, T.Number)));
CHECK(IsConstant(Type::Intersect(T.ObjectConstant1, T.Object)));
- CheckEqual(T.Intersect(T.Integer31, T.Number), T.Integer31);
- CheckEqual(T.Intersect(T.Integer31Constant, T.Number), T.Integer31Constant);
+ CheckEqual(T.Intersect(T.Smi, T.Number), T.Smi);
+ CheckEqual(T.Intersect(T.SmiConstant, T.Number), T.SmiConstant);
CheckEqual(T.Intersect(T.ObjectConstant1, T.Object), T.ObjectConstant1);
// Class-constant
@@ -642,7 +677,7 @@ TEST(Intersect) {
CHECK(IsClass(
Type::Intersect(T.Union(T.ArrayClass, T.ObjectConstant2), T.ArrayClass)));
CHECK(IsClass(
- Type::Intersect(T.Union(T.Object, T.Integer31Constant), T.ArrayClass)));
+ Type::Intersect(T.Union(T.Object, T.SmiConstant), T.ArrayClass)));
CHECK(IsBitset(
Type::Intersect(T.Union(T.ObjectClass, T.ArrayConstant1), T.ArrayClass)));
@@ -650,7 +685,7 @@ TEST(Intersect) {
T.Intersect(T.ArrayClass, T.Union(T.ObjectConstant2, T.ArrayClass)),
T.ArrayClass);
CheckEqual(
- T.Intersect(T.ArrayClass, T.Union(T.Object, T.Integer31Constant)),
+ T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
T.ArrayClass);
CheckEqual(
T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant1), T.ArrayClass),
@@ -660,7 +695,7 @@ TEST(Intersect) {
CHECK(IsConstant(Type::Intersect(
T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2))));
CHECK(IsConstant(Type::Intersect(
- T.Union(T.Number, T.ObjectClass), T.Integer31Constant)));
+ T.Union(T.Number, T.ObjectClass), T.SmiConstant)));
CHECK(IsBitset(Type::Intersect(
T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1)));
@@ -669,28 +704,28 @@ TEST(Intersect) {
T.ObjectConstant1, T.Union(T.ObjectConstant1, T.ObjectConstant2)),
T.ObjectConstant1);
CheckEqual(
- T.Intersect(T.Integer31Constant, T.Union(T.Number, T.ObjectConstant2)),
- T.Integer31Constant);
+ T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
+ T.SmiConstant);
CheckEqual(
T.Intersect(T.Union(T.ArrayConstant1, T.ObjectClass), T.ObjectConstant1),
T.None);
// Union-union
CHECK(IsUnion(Type::Intersect(
- T.Union(T.Number, T.ArrayClass), T.Union(T.Integer32, T.Array))));
+ T.Union(T.Number, T.ArrayClass), T.Union(T.Signed32, T.Array))));
CHECK(IsBitset(Type::Intersect(
- T.Union(T.Number, T.ObjectClass), T.Union(T.Integer32, T.Array))));
+ T.Union(T.Number, T.ObjectClass), T.Union(T.Signed32, T.Array))));
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ArrayClass),
- T.Union(T.Integer31, T.Array)),
- T.Union(T.Integer31, T.ArrayClass));
+ T.Union(T.Smi, T.Array)),
+ T.Union(T.Smi, T.ArrayClass));
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ObjectClass),
- T.Union(T.Integer32, T.Array)),
- T.Integer32);
+ T.Union(T.Signed32, T.Array)),
+ T.Signed32);
CheckEqual(
T.Intersect(
T.Union(T.ObjectConstant2, T.ObjectConstant1),
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 1d268580ed..0936908f12 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -36,6 +36,35 @@
using namespace v8::internal;
+#define MAKE_HANDLES_AND_DISALLOW_ALLOCATION \
+Isolate* isolate = CcTest::i_isolate(); \
+Factory* factory = isolate->factory(); \
+HandleScope sc(isolate); \
+Handle<String> handles[] = { \
+ factory->InternalizeUtf8String("A"), \
+ factory->InternalizeUtf8String("B"), \
+ factory->InternalizeUtf8String("C"), \
+ factory->InternalizeUtf8String("D"), \
+ factory->InternalizeUtf8String("E"), \
+ factory->InternalizeUtf8String("F"), \
+ factory->InternalizeUtf8String("G") \
+}; \
+DisallowHeapAllocation _disable
+
+#define MAKE_UNIQUES_A_B_C \
+ Unique<String> A(handles[0]); \
+ Unique<String> B(handles[1]); \
+ Unique<String> C(handles[2])
+
+#define MAKE_UNIQUES_A_B_C_D_E_F_G \
+ Unique<String> A(handles[0]); \
+ Unique<String> B(handles[1]); \
+ Unique<String> C(handles[2]); \
+ Unique<String> D(handles[3]); \
+ Unique<String> E(handles[4]); \
+ Unique<String> F(handles[5]); \
+ Unique<String> G(handles[6])
+
template <class T, class U>
void CheckHashCodeEqual(Unique<T> a, Unique<U> b) {
int64_t hasha = static_cast<int64_t>(a.Hashcode());
@@ -58,11 +87,9 @@ void CheckHashCodeNotEqual(Unique<T> a, Unique<U> b) {
TEST(UniqueCreate) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ Handle<String> A = handles[0], B = handles[1];
- Handle<String> A = factory->InternalizeUtf8String("A");
Unique<String> HA(A);
CHECK(*HA.handle() == *A);
@@ -77,7 +104,6 @@ TEST(UniqueCreate) {
CHECK(HA2 == HA);
CHECK_EQ(*HA2.handle(), *HA.handle());
- Handle<String> B = factory->InternalizeUtf8String("B");
Unique<String> HB(B);
CheckHashCodeNotEqual(HA, HB);
@@ -93,11 +119,9 @@ TEST(UniqueCreate) {
TEST(UniqueSubsume) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ Handle<String> A = handles[0];
- Handle<String> A = factory->InternalizeUtf8String("A");
Unique<String> HA(A);
CHECK(*HA.handle() == *A);
@@ -116,13 +140,8 @@ TEST(UniqueSubsume) {
TEST(UniqueSet_Add) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
-
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
Zone zone(isolate);
@@ -146,6 +165,104 @@ TEST(UniqueSet_Add) {
}
+TEST(UniqueSet_Remove) {
+ CcTest::InitializeVM();
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
+
+ Zone zone(isolate);
+
+ UniqueSet<String>* set = new(&zone) UniqueSet<String>();
+
+ set->Add(A, &zone);
+ set->Add(B, &zone);
+ set->Add(C, &zone);
+ CHECK_EQ(3, set->size());
+
+ set->Remove(A);
+ CHECK_EQ(2, set->size());
+ CHECK(!set->Contains(A));
+ CHECK(set->Contains(B));
+ CHECK(set->Contains(C));
+
+ set->Remove(A);
+ CHECK_EQ(2, set->size());
+ CHECK(!set->Contains(A));
+ CHECK(set->Contains(B));
+ CHECK(set->Contains(C));
+
+ set->Remove(B);
+ CHECK_EQ(1, set->size());
+ CHECK(!set->Contains(A));
+ CHECK(!set->Contains(B));
+ CHECK(set->Contains(C));
+
+ set->Remove(C);
+ CHECK_EQ(0, set->size());
+ CHECK(!set->Contains(A));
+ CHECK(!set->Contains(B));
+ CHECK(!set->Contains(C));
+}
+
+
+TEST(UniqueSet_Contains) {
+ CcTest::InitializeVM();
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
+
+ Zone zone(isolate);
+
+ UniqueSet<String>* set = new(&zone) UniqueSet<String>();
+
+ CHECK_EQ(0, set->size());
+ set->Add(A, &zone);
+ CHECK(set->Contains(A));
+ CHECK(!set->Contains(B));
+ CHECK(!set->Contains(C));
+
+ set->Add(A, &zone);
+ CHECK(set->Contains(A));
+ CHECK(!set->Contains(B));
+ CHECK(!set->Contains(C));
+
+ set->Add(B, &zone);
+ CHECK(set->Contains(A));
+ CHECK(set->Contains(B));
+
+ set->Add(C, &zone);
+ CHECK(set->Contains(A));
+ CHECK(set->Contains(B));
+ CHECK(set->Contains(C));
+}
+
+
+TEST(UniqueSet_At) {
+ CcTest::InitializeVM();
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
+
+ Zone zone(isolate);
+
+ UniqueSet<String>* set = new(&zone) UniqueSet<String>();
+
+ CHECK_EQ(0, set->size());
+ set->Add(A, &zone);
+ CHECK(A == set->at(0));
+
+ set->Add(A, &zone);
+ CHECK(A == set->at(0));
+
+ set->Add(B, &zone);
+ CHECK(A == set->at(0) || B == set->at(0));
+ CHECK(A == set->at(1) || B == set->at(1));
+
+ set->Add(C, &zone);
+ CHECK(A == set->at(0) || B == set->at(0) || C == set->at(0));
+ CHECK(A == set->at(1) || B == set->at(1) || C == set->at(1));
+ CHECK(A == set->at(2) || B == set->at(2) || C == set->at(2));
+}
+
+
template <class T>
static void CHECK_SETS(
UniqueSet<T>* set1, UniqueSet<T>* set2, bool expected) {
@@ -158,13 +275,8 @@ static void CHECK_SETS(
TEST(UniqueSet_Equals) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
-
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
Zone zone(isolate);
@@ -201,13 +313,8 @@ TEST(UniqueSet_Equals) {
TEST(UniqueSet_IsSubset1) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
-
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
Zone zone(isolate);
@@ -241,17 +348,8 @@ TEST(UniqueSet_IsSubset1) {
TEST(UniqueSet_IsSubset2) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
-
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
- Unique<String> D(factory->InternalizeUtf8String("D"));
- Unique<String> E(factory->InternalizeUtf8String("E"));
- Unique<String> F(factory->InternalizeUtf8String("F"));
- Unique<String> G(factory->InternalizeUtf8String("G"));
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C_D_E_F_G;
Zone zone(isolate);
@@ -293,20 +391,11 @@ TEST(UniqueSet_IsSubsetExhaustive) {
const int kSetSize = 6;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C_D_E_F_G;
Zone zone(isolate);
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
- Unique<String> D(factory->InternalizeUtf8String("D"));
- Unique<String> E(factory->InternalizeUtf8String("E"));
- Unique<String> F(factory->InternalizeUtf8String("F"));
- Unique<String> G(factory->InternalizeUtf8String("G"));
-
Unique<String> elements[] = {
A, B, C, D, E, F, G
};
@@ -325,13 +414,8 @@ TEST(UniqueSet_IsSubsetExhaustive) {
TEST(UniqueSet_Intersect1) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
-
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
Zone zone(isolate);
@@ -371,20 +455,11 @@ TEST(UniqueSet_IntersectExhaustive) {
const int kSetSize = 6;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C_D_E_F_G;
Zone zone(isolate);
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
- Unique<String> D(factory->InternalizeUtf8String("D"));
- Unique<String> E(factory->InternalizeUtf8String("E"));
- Unique<String> F(factory->InternalizeUtf8String("F"));
- Unique<String> G(factory->InternalizeUtf8String("G"));
-
Unique<String> elements[] = {
A, B, C, D, E, F, G
};
@@ -407,13 +482,8 @@ TEST(UniqueSet_IntersectExhaustive) {
TEST(UniqueSet_Union1) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
-
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C;
Zone zone(isolate);
@@ -453,20 +523,11 @@ TEST(UniqueSet_UnionExhaustive) {
const int kSetSize = 6;
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope sc(isolate);
+ MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
+ MAKE_UNIQUES_A_B_C_D_E_F_G;
Zone zone(isolate);
- Unique<String> A(factory->InternalizeUtf8String("A"));
- Unique<String> B(factory->InternalizeUtf8String("B"));
- Unique<String> C(factory->InternalizeUtf8String("C"));
- Unique<String> D(factory->InternalizeUtf8String("D"));
- Unique<String> E(factory->InternalizeUtf8String("E"));
- Unique<String> F(factory->InternalizeUtf8String("F"));
- Unique<String> G(factory->InternalizeUtf8String("G"));
-
Unique<String> elements[] = {
A, B, C, D, E, F, G
};
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index feff477933..86d52fa82b 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -103,35 +103,22 @@ static const int kAreaSize = 512;
void TestMemMove(byte* area1,
byte* area2,
- byte* area3,
int src_offset,
int dest_offset,
int length) {
for (int i = 0; i < kAreaSize; i++) {
area1[i] = i & 0xFF;
area2[i] = i & 0xFF;
- area3[i] = i & 0xFF;
}
OS::MemMove(area1 + dest_offset, area1 + src_offset, length);
- MoveBytes(area2 + dest_offset, area2 + src_offset, length);
- memmove(area3 + dest_offset, area3 + src_offset, length);
- if (memcmp(area1, area3, kAreaSize) != 0) {
+ memmove(area2 + dest_offset, area2 + src_offset, length);
+ if (memcmp(area1, area2, kAreaSize) != 0) {
printf("OS::MemMove(): src_offset: %d, dest_offset: %d, length: %d\n",
src_offset, dest_offset, length);
for (int i = 0; i < kAreaSize; i++) {
- if (area1[i] == area3[i]) continue;
+ if (area1[i] == area2[i]) continue;
printf("diff at offset %d (%p): is %d, should be %d\n",
- i, reinterpret_cast<void*>(area1 + i), area1[i], area3[i]);
- }
- CHECK(false);
- }
- if (memcmp(area2, area3, kAreaSize) != 0) {
- printf("MoveBytes(): src_offset: %d, dest_offset: %d, length: %d\n",
- src_offset, dest_offset, length);
- for (int i = 0; i < kAreaSize; i++) {
- if (area2[i] == area3[i]) continue;
- printf("diff at offset %d (%p): is %d, should be %d\n",
- i, reinterpret_cast<void*>(area2 + i), area2[i], area3[i]);
+ i, reinterpret_cast<void*>(area1 + i), area1[i], area2[i]);
}
CHECK(false);
}
@@ -142,7 +129,6 @@ TEST(MemMove) {
v8::V8::Initialize();
byte* area1 = new byte[kAreaSize];
byte* area2 = new byte[kAreaSize];
- byte* area3 = new byte[kAreaSize];
static const int kMinOffset = 32;
static const int kMaxOffset = 64;
@@ -152,13 +138,12 @@ TEST(MemMove) {
for (int src_offset = kMinOffset; src_offset <= kMaxOffset; src_offset++) {
for (int dst_offset = kMinOffset; dst_offset <= kMaxOffset; dst_offset++) {
for (int length = 0; length <= kMaxLength; length++) {
- TestMemMove(area1, area2, area3, src_offset, dst_offset, length);
+ TestMemMove(area1, area2, src_offset, dst_offset, length);
}
}
}
delete[] area1;
delete[] area2;
- delete[] area3;
}
diff --git a/deps/v8/test/intl/OWNERS b/deps/v8/test/intl/OWNERS
new file mode 100644
index 0000000000..9d54cbbea8
--- /dev/null
+++ b/deps/v8/test/intl/OWNERS
@@ -0,0 +1,2 @@
+cira@chromium.org
+mnita@google.com
diff --git a/deps/v8/test/intl/date-format/parse-MMMdy.js b/deps/v8/test/intl/date-format/parse-MMMdy.js
index 7136527810..b23a3cde3a 100644
--- a/deps/v8/test/intl/date-format/parse-MMMdy.js
+++ b/deps/v8/test/intl/date-format/parse-MMMdy.js
@@ -30,19 +30,22 @@
var dtf = new Intl.DateTimeFormat(['en'],
{year: 'numeric', month: 'short',
- day: 'numeric'});
+ day: 'numeric',
+ timeZone: 'America/Los_Angeles'});
// Make sure we have pattern we expect (may change in the future).
assertEquals('MMM d, y', dtf.resolved.pattern);
-assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('May 4, 1974'))));
+var date = dtf.v8Parse('Feb 4, 1974');
+assertEquals(1974, date.getUTCFullYear());
+assertEquals(1, date.getUTCMonth());
+assertEquals(4, date.getUTCDate());
// Missing , in the pattern.
-assertEquals(undefined, dtf.v8Parse('May 4 1974'));
+assertEquals(undefined, dtf.v8Parse('Feb 4 1974'));
// Extra "th" after 4 in the pattern.
-assertEquals(undefined, dtf.v8Parse('May 4th, 1974'));
+assertEquals(undefined, dtf.v8Parse('Feb 4th, 1974'));
// Wrong pattern.
-assertEquals(undefined, dtf.v8Parse('5/4/1974'));
+assertEquals(undefined, dtf.v8Parse('2/4/1974'));
diff --git a/deps/v8/test/intl/date-format/parse-mdy.js b/deps/v8/test/intl/date-format/parse-mdy.js
index e767a0b2d2..7b1a79af86 100644
--- a/deps/v8/test/intl/date-format/parse-mdy.js
+++ b/deps/v8/test/intl/date-format/parse-mdy.js
@@ -27,23 +27,25 @@
// Testing v8Parse method for date only.
-var dtf = new Intl.DateTimeFormat(['en']);
+function checkDate(date) {
+ assertEquals(1974, date.getUTCFullYear());
+ assertEquals(1, date.getUTCMonth());
+ assertEquals(4, date.getUTCDate());
+}
+
+var dtf = new Intl.DateTimeFormat(['en'], {timeZone: 'America/Los_Angeles'});
// Make sure we have pattern we expect (may change in the future).
assertEquals('M/d/y', dtf.resolved.pattern);
-assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('5/4/74'))));
-assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('05/04/74'))));
-assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('5/04/74'))));
-assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('5/4/1974'))));
-
-// Month is numeric, so it fails on "May".
-assertEquals(undefined, dtf.v8Parse('May 4th 1974'));
+checkDate(dtf.v8Parse('2/4/74'));
+checkDate(dtf.v8Parse('02/04/74'));
+checkDate(dtf.v8Parse('2/04/74'));
+checkDate(dtf.v8Parse('02/4/74'));
+checkDate(dtf.v8Parse('2/4/1974'));
+checkDate(dtf.v8Parse('02/4/1974'));
+checkDate(dtf.v8Parse('2/04/1974'));
+checkDate(dtf.v8Parse('02/04/1974'));
-// Time is ignored from the input, since the pattern doesn't have it.
-assertEquals('Sat May 04 1974 00:00:00 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('5/4/74 12:30:12'))));
+// Month is numeric, so it fails on "Feb".
+assertEquals(undefined, dtf.v8Parse('Feb 4th 1974'));
diff --git a/deps/v8/test/intl/date-format/parse-mdyhms.js b/deps/v8/test/intl/date-format/parse-mdyhms.js
index 74f7467f3d..73efb62053 100644
--- a/deps/v8/test/intl/date-format/parse-mdyhms.js
+++ b/deps/v8/test/intl/date-format/parse-mdyhms.js
@@ -30,22 +30,28 @@
var dtf = new Intl.DateTimeFormat(['en'],
{year: 'numeric', month: 'numeric',
day: 'numeric', hour: 'numeric',
- minute: 'numeric', second: 'numeric'});
+ minute: 'numeric', second: 'numeric',
+ timeZone: 'UTC'});
// Make sure we have pattern we expect (may change in the future).
assertEquals('M/d/y h:mm:ss a', dtf.resolved.pattern);
-assertEquals('Sat May 04 1974 12:30:12 GMT-0007 (PDT)',
- usePDT(String(dtf.v8Parse('5/4/74 12:30:12 pm'))));
+var date = dtf.v8Parse('2/4/74 12:30:42 pm');
+assertEquals(1974, date.getUTCFullYear());
+assertEquals(1, date.getUTCMonth());
+assertEquals(4, date.getUTCDate());
+assertEquals(12, date.getUTCHours());
+assertEquals(30, date.getUTCMinutes());
+assertEquals(42, date.getUTCSeconds());
// AM/PM were not specified.
-assertEquals(undefined, dtf.v8Parse('5/4/74 12:30:12'));
+assertEquals(undefined, dtf.v8Parse('2/4/74 12:30:12'));
// Time was not specified.
-assertEquals(undefined, dtf.v8Parse('5/4/74'));
+assertEquals(undefined, dtf.v8Parse('2/4/74'));
-// Month is numeric, so it fails on "May".
-assertEquals(undefined, dtf.v8Parse('May 4th 1974'));
+// Month is numeric, so it fails on "Feb".
+assertEquals(undefined, dtf.v8Parse('Feb 4th 1974'));
// Wrong date delimiter.
-assertEquals(undefined, dtf.v8Parse('5-4-74 12:30:12 am'));
+assertEquals(undefined, dtf.v8Parse('2-4-74 12:30:12 am'));
diff --git a/deps/v8/test/intl/date-format/timezone-name.js b/deps/v8/test/intl/date-format/timezone-name.js
new file mode 100644
index 0000000000..2ed5c1acae
--- /dev/null
+++ b/deps/v8/test/intl/date-format/timezone-name.js
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests time zone names.
+
+// Winter date (PST).
+var winter = new Date(2013, 1, 12, 14, 42, 53, 0);
+
+// Summer date (PDT).
+var summer = new Date(2013, 7, 12, 14, 42, 53, 0);
+
+// Common flags for both formatters.
+var flags = {
+ year: 'numeric', month: 'long', day: 'numeric',
+ hour : '2-digit', minute : '2-digit', second : '2-digit',
+ timeZone: 'America/Los_Angeles'
+};
+
+flags.timeZoneName = "short";
+var dfs = new Intl.DateTimeFormat('en-US', flags);
+
+assertTrue(dfs.format(winter).indexOf('PST') !== -1);
+assertTrue(dfs.format(summer).indexOf('PDT') !== -1);
+
+flags.timeZoneName = "long";
+var dfl = new Intl.DateTimeFormat('en-US', flags);
+
+assertTrue(dfl.format(winter).indexOf('Pacific Standard Time') !== -1);
+assertTrue(dfl.format(summer).indexOf('Pacific Daylight Time') !== -1);
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 34610a5d73..fc3c66b9c1 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -25,12 +25,17 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix intl
+[
+[ALWAYS, {
+ # The following tests use getDefaultTimeZone().
+ 'date-format/resolved-options': [FAIL],
+ 'date-format/timezone': [FAIL],
+ 'general/v8Intl-exists': [FAIL],
-# The following tests use getDefaultTimeZone().
-date-format/resolved-options: FAIL
-date-format/timezone: FAIL
-general/v8Intl-exists: FAIL
+ # TODO(jochen): The following test is flaky.
+ 'overrides/caching': [PASS, FAIL],
-# TODO(jochen): The following test is flaky.
-overrides/caching: PASS || FAIL
+ # BUG(2899): default locale for search fails on mac and on android.
+ 'collator/default-locale': [['system == macos or arch == android_arm or arch == android_ia32', FAIL]],
+}], # ALWAYS
+]
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 09d29d0bee..9fc087e5f5 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -57,7 +57,6 @@ class IntlTestSuite(testsuite.TestSuite):
files = []
files.append(os.path.join(self.root, "assert.js"))
files.append(os.path.join(self.root, "utils.js"))
- files.append(os.path.join(self.root, "date-format", "utils.js"))
files.append(os.path.join(self.root, testcase.path + self.suffix()))
flags += files
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 441f8edd0d..234bf0f35c 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -25,7 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix message
-
-# All tests in the bug directory are expected to fail.
-bugs/*: FAIL
+[
+[ALWAYS, {
+ # All tests in the bug directory are expected to fail.
+ 'bugs/*': [FAIL],
+}], # ALWAYS
+]
diff --git a/deps/v8/test/message/paren_in_arg_string.out b/deps/v8/test/message/paren_in_arg_string.out
index 3bc978b965..0ed59bab1e 100644
--- a/deps/v8/test/message/paren_in_arg_string.out
+++ b/deps/v8/test/message/paren_in_arg_string.out
@@ -2,5 +2,5 @@
var paren_in_arg_string_bad = new Function(')', 'return;');
^
SyntaxError: Function arg string contains parenthesis
- at Function (<anonymous>)
- at *%(basename)s:29:31 \ No newline at end of file
+ at Function (native)
+ at *%(basename)s:29:31
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 411f71ceb2..e4f3f5587a 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -35,6 +35,7 @@ from testrunner.objects import testcase
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+INVALID_FLAGS = ["--enable-slow-asserts"]
class MessageTestSuite(testsuite.TestSuite):
@@ -62,6 +63,7 @@ class MessageTestSuite(testsuite.TestSuite):
for match in flags_match:
result += match.strip().split()
result += context.mode_flags
+ result = [x for x in result if x not in INVALID_FLAGS]
result.append(os.path.join(self.root, testcase.path + ".js"))
return testcase.flags + result
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index 5f6817b6d3..f32344a405 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -35,6 +35,11 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
+// Reset the GC stress mode to be off. Needed because AllocationMementos only
+// live for one gc, so a gc that happens in certain fragile areas of the test
+// can break assumptions.
+%SetFlags("--gc-interval=-1")
+
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
@@ -150,17 +155,14 @@ if (support_smi_only_arrays) {
// Verify that we will not pretransition the double->fast path.
obj = fastliteralcase(get_standard_literal(), "elliot");
assertKind(elements_kind.fast, obj);
- // This fails until we turn off optimistic transitions to the
- // most general elements kind seen on keyed stores. It's a goal
- // to turn it off, but for now we need it.
- // obj = fastliteralcase(3);
- // assertKind(elements_kind.fast_double, obj);
+ obj = fastliteralcase(get_standard_literal(), 3);
+ assertKind(elements_kind.fast, obj);
// Make sure this works in crankshafted code too.
%OptimizeFunctionOnNextCall(get_standard_literal);
get_standard_literal();
obj = get_standard_literal();
- assertKind(elements_kind.fast_double, obj);
+ assertKind(elements_kind.fast, obj);
function fastliteralcase_smifast(value) {
var literal = [1, 2, 3, 4];
@@ -231,16 +233,14 @@ if (support_smi_only_arrays) {
obj = newarraycase_length_smidouble(2);
assertKind(elements_kind.fast_double, obj);
- // Try to continue the transition to fast object, but
- // we will not pretransition from double->fast, because
- // it may hurt performance ("poisoning").
+ // Try to continue the transition to fast object. This won't work for
+ // constructed arrays because constructor dispatch is done on the
+ // elements kind, and a DOUBLE array constructor won't create an allocation
+ // memento.
obj = newarraycase_length_smidouble("coates");
assertKind(elements_kind.fast, obj);
- obj = newarraycase_length_smidouble(2.5);
- // However, because of optimistic transitions, we will
- // transition to the most general kind of elements kind found,
- // therefore I can't count on this assert yet.
- // assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_length_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
function newarraycase_length_smiobj(value) {
var a = new Array(3);
@@ -379,4 +379,114 @@ if (support_smi_only_arrays) {
instanceof_check(realmBArray);
assertUnoptimized(instanceof_check);
+
+ // Case: make sure nested arrays benefit from allocation site feedback as
+ // well.
+ (function() {
+ // Make sure we handle nested arrays
+ function get_nested_literal() {
+ var literal = [[1,2,3,4], [2], [3]];
+ return literal;
+ }
+
+ obj = get_nested_literal();
+ assertKind(elements_kind.fast, obj);
+ obj[0][0] = 3.5;
+ obj[2][0] = "hello";
+ obj = get_nested_literal();
+ assertKind(elements_kind.fast_double, obj[0]);
+ assertKind(elements_kind.fast_smi_only, obj[1]);
+ assertKind(elements_kind.fast, obj[2]);
+
+ // A more complex nested literal case.
+ function get_deep_nested_literal() {
+ var literal = [[1], [[2], "hello"], 3, [4]];
+ return literal;
+ }
+
+ obj = get_deep_nested_literal();
+ assertKind(elements_kind.fast_smi_only, obj[1][0]);
+ obj[0][0] = 3.5;
+ obj[1][0][0] = "goodbye";
+ assertKind(elements_kind.fast_double, obj[0]);
+ assertKind(elements_kind.fast, obj[1][0]);
+
+ obj = get_deep_nested_literal();
+ assertKind(elements_kind.fast_double, obj[0]);
+ assertKind(elements_kind.fast, obj[1][0]);
+ })();
+
+
+ // Make sure object literals with array fields benefit from the type feedback
+ // that allocation mementos provide.
+ (function() {
+ // A literal in an object
+ function get_object_literal() {
+ var literal = {
+ array: [1,2,3],
+ data: 3.5
+ };
+ return literal;
+ }
+
+ obj = get_object_literal();
+ assertKind(elements_kind.fast_smi_only, obj.array);
+ obj.array[1] = 3.5;
+ assertKind(elements_kind.fast_double, obj.array);
+ obj = get_object_literal();
+ assertKind(elements_kind.fast_double, obj.array);
+
+ function get_nested_object_literal() {
+ var literal = {
+ array: [[1],[2],[3]],
+ data: 3.5
+ };
+ return literal;
+ }
+
+ obj = get_nested_object_literal();
+ assertKind(elements_kind.fast, obj.array);
+ assertKind(elements_kind.fast_smi_only, obj.array[1]);
+ obj.array[1][0] = 3.5;
+ assertKind(elements_kind.fast_double, obj.array[1]);
+ obj = get_nested_object_literal();
+ assertKind(elements_kind.fast_double, obj.array[1]);
+
+ %OptimizeFunctionOnNextCall(get_nested_object_literal);
+ get_nested_object_literal();
+ obj = get_nested_object_literal();
+ assertKind(elements_kind.fast_double, obj.array[1]);
+
+ // Make sure we handle nested arrays
+ function get_nested_literal() {
+ var literal = [[1,2,3,4], [2], [3]];
+ return literal;
+ }
+
+ obj = get_nested_literal();
+ assertKind(elements_kind.fast, obj);
+ obj[0][0] = 3.5;
+ obj[2][0] = "hello";
+ obj = get_nested_literal();
+ assertKind(elements_kind.fast_double, obj[0]);
+ assertKind(elements_kind.fast_smi_only, obj[1]);
+ assertKind(elements_kind.fast, obj[2]);
+
+ // A more complex nested literal case.
+ function get_deep_nested_literal() {
+ var literal = [[1], [[2], "hello"], 3, [4]];
+ return literal;
+ }
+
+ obj = get_deep_nested_literal();
+ assertKind(elements_kind.fast_smi_only, obj[1][0]);
+ obj[0][0] = 3.5;
+ obj[1][0][0] = "goodbye";
+ assertKind(elements_kind.fast_double, obj[0]);
+ assertKind(elements_kind.fast, obj[1][0]);
+
+ obj = get_deep_nested_literal();
+ assertKind(elements_kind.fast_double, obj[0]);
+ assertKind(elements_kind.fast, obj[1][0]);
+ })();
}
diff --git a/deps/v8/test/mjsunit/array-functions-prototype-misc.js b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
index 0543c323b6..74dc9a6be0 100644
--- a/deps/v8/test/mjsunit/array-functions-prototype-misc.js
+++ b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
@@ -31,7 +31,7 @@
* should work on other objects too, so we test that too.
*/
-var LARGE = 40000000;
+var LARGE = 4000000;
var VERYLARGE = 4000000000;
// Nicer for firefox 1.5. Unless you uncomment the following two lines,
@@ -276,7 +276,7 @@ for (var i = 0; i < a.length; i += 1000) {
}
// Take something near the end of the array.
-for (var i = 0; i < 100; i++) {
+for (var i = 0; i < 10; i++) {
var top = a.splice(LARGE, 5);
assertEquals(5, top.length);
assertEquals(LARGE, top[0]);
diff --git a/deps/v8/test/mjsunit/array-literal-feedback.js b/deps/v8/test/mjsunit/array-literal-feedback.js
index 3378394d90..d2245c62a2 100644
--- a/deps/v8/test/mjsunit/array-literal-feedback.js
+++ b/deps/v8/test/mjsunit/array-literal-feedback.js
@@ -44,6 +44,42 @@ if (support_smi_only_arrays) {
print("Tests do NOT include smi-only arrays.");
}
+var elements_kind = {
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ external_byte : 'external byte elements',
+ external_unsigned_byte : 'external unsigned byte elements',
+ external_short : 'external short elements',
+ external_unsigned_short : 'external unsigned short elements',
+ external_int : 'external int elements',
+ external_unsigned_int : 'external unsigned int elements',
+ external_float : 'external float elements',
+ external_double : 'external double elements',
+ external_pixel : 'external pixel elements'
+}
+
+function getKind(obj) {
+ if (%HasFastSmiElements(obj)) return elements_kind.fast_smi_only;
+ if (%HasFastObjectElements(obj)) return elements_kind.fast;
+ if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
+ if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+}
+
+function isHoley(obj) {
+ if (%HasFastHoleyElements(obj)) return true;
+ return false;
+}
+
+function assertKind(expected, obj, name_opt) {
+ if (!support_smi_only_arrays &&
+ expected == elements_kind.fast_smi_only) {
+ expected = elements_kind.fast;
+ }
+ assertEquals(expected, getKind(obj), name_opt);
+}
+
if (support_smi_only_arrays) {
function get_literal(x) {
@@ -72,4 +108,19 @@ if (support_smi_only_arrays) {
b = get_literal(3);
assertTrue(%HasFastDoubleElements(b));
assertOptimized(get_literal);
+
+
+ // Test: make sure allocation site information is updated through a
+ // transition from SMI->DOUBLE->FAST
+ (function() {
+ function bar(a, b, c) {
+ return [a, b, c];
+ }
+
+ a = bar(1, 2, 3);
+ a[0] = 3.5;
+ a[1] = 'hi';
+ b = bar(1, 2, 3);
+ assertKind(elements_kind.fast, b);
+ })();
}
diff --git a/deps/v8/test/mjsunit/big-array-literal.js b/deps/v8/test/mjsunit/big-array-literal.js
index 9f0617989c..13f91f855e 100644
--- a/deps/v8/test/mjsunit/big-array-literal.js
+++ b/deps/v8/test/mjsunit/big-array-literal.js
@@ -92,16 +92,25 @@ for (var i = 0; i < sizes.length; i++) {
testLiteral(sizes[i], true);
}
+
+function checkExpectedException(e) {
+ assertInstanceof(e, RangeError);
+ assertTrue(e.message.indexOf("Maximum call stack size exceeded") >= 0);
+}
+
+
function testLiteralAndCatch(size) {
var big_enough = false;
try {
testLiteral(size, false);
} catch (e) {
+ checkExpectedException(e);
big_enough = true;
}
try {
testLiteral(size, true);
} catch (e) {
+ checkExpectedException(e);
big_enough = true;
}
return big_enough;
diff --git a/deps/v8/test/mjsunit/big-object-literal.js b/deps/v8/test/mjsunit/big-object-literal.js
index c937f54de1..92c6ab7b7b 100644
--- a/deps/v8/test/mjsunit/big-object-literal.js
+++ b/deps/v8/test/mjsunit/big-object-literal.js
@@ -92,16 +92,25 @@ for (var i = 0; i < sizes.length; i++) {
testLiteral(sizes[i], true);
}
+
+function checkExpectedException(e) {
+ assertInstanceof(e, RangeError);
+ assertTrue(e.message.indexOf("Maximum call stack size exceeded") >= 0);
+}
+
+
function testLiteralAndCatch(size) {
var big_enough = false;
try {
testLiteral(size, false);
} catch (e) {
+ checkExpectedException(e);
big_enough = true;
}
try {
testLiteral(size, true);
} catch (e) {
+ checkExpectedException(e);
big_enough = true;
}
return big_enough;
diff --git a/deps/v8/test/mjsunit/bitwise-operations-bools.js b/deps/v8/test/mjsunit/bitwise-operations-bools.js
new file mode 100644
index 0000000000..6c7da110b0
--- /dev/null
+++ b/deps/v8/test/mjsunit/bitwise-operations-bools.js
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test bitwise operations with booleans.
+
+var t = 1;
+
+function testFalseLeftHandSide() {
+ var b;
+ if (t) b = false;
+ assertEquals(b | 1, 1);
+ assertEquals(b & 1, 0);
+ assertEquals(b ^ 1, 1);
+ assertEquals(b << 1, 0);
+ assertEquals(b >> 1, 0);
+ assertEquals(b >>> 1, 0);
+}
+
+function testFalseRightHandSide() {
+ if (t) b = false;
+ assertEquals(1 | b, 1);
+ assertEquals(1 & b, 0);
+ assertEquals(1 ^ b, 1);
+ assertEquals(1 << b, 1);
+ assertEquals(1 >> b, 1);
+ assertEquals(1 >>> b, 1);
+}
+
+function testTrueLeftHandSide() {
+ if (t) b = true;
+ assertEquals(b | 1, 1);
+ assertEquals(b & 1, 1);
+ assertEquals(b ^ 1, 0);
+ assertEquals(b << 1, 2);
+ assertEquals(b >> 1, 0);
+ assertEquals(b >>> 1, 0);
+}
+
+function testTrueRightHandSide() {
+ if (t) b = true;
+ assertEquals(1 | b, 1);
+ assertEquals(1 & b, 1);
+ assertEquals(1 ^ b, 0);
+ assertEquals(1 << b, 2);
+ assertEquals(1 >> b, 0);
+ assertEquals(1 >>> b, 0);
+}
+
+function testBothSides() {
+ if (t) a = true;
+ if (t) b = false;
+ assertEquals(a | b, 1);
+ assertEquals(a & b, 0);
+ assertEquals(a ^ b, 1);
+ assertEquals(a << b, 1);
+ assertEquals(a >> b, 1);
+ assertEquals(a >>> b, 1);
+}
+
+
+testFalseLeftHandSide();
+testFalseRightHandSide();
+testTrueLeftHandSide();
+testTrueRightHandSide();
+testFalseLeftHandSide();
+testFalseRightHandSide();
+testTrueLeftHandSide();
+testTrueRightHandSide();
+testBothSides();
+testBothSides();
diff --git a/deps/v8/test/mjsunit/compare-known-objects.js b/deps/v8/test/mjsunit/compare-known-objects.js
new file mode 100644
index 0000000000..afffc07014
--- /dev/null
+++ b/deps/v8/test/mjsunit/compare-known-objects.js
@@ -0,0 +1,65 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test CompareIC stubs for normal and strict equality comparison of known
+// objects in slow mode. These objects share the same map even though they
+// might have completely different properties.
+
+function eq(a, b) {
+ return a == b;
+}
+
+function eq_strict(a, b) {
+ return a === b;
+}
+
+function test(a, b) {
+ // Check CompareIC for equality of known objects.
+ assertTrue(eq(a, a));
+ assertTrue(eq(b, b));
+ assertFalse(eq(a, b));
+ // Check CompareIC for strict equality of known objects.
+ assertTrue(eq_strict(a, a));
+ assertTrue(eq_strict(b, b));
+ assertFalse(eq_strict(a, b));
+}
+
+function O(){};
+O.prototype.t = function() {}
+
+var obj1 = new O;
+var obj2 = new O;
+
+// Test original objects.
+assertTrue(%HaveSameMap(obj1, obj2));
+test(obj1, obj2);
+
+// Test after adding property to first object.
+obj1.x = 1;
+test(obj1, obj2);
diff --git a/deps/v8/test/mjsunit/compare-objects.js b/deps/v8/test/mjsunit/compare-objects.js
new file mode 100644
index 0000000000..fb31203b74
--- /dev/null
+++ b/deps/v8/test/mjsunit/compare-objects.js
@@ -0,0 +1,108 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test CompareIC stubs for normal and strict equality comparison of known
+// objects in hydrogen.
+
+function lt(a, b) {
+ return a < b;
+}
+
+function gt(a, b) {
+ return a > b;
+}
+
+function eq(a, b) {
+ return a == b;
+}
+
+function eq_strict(a, b) {
+ return a === b;
+}
+
+function test(a, b, less, greater) {
+ // Check CompareIC for equality of known objects.
+ assertTrue(eq(a, a));
+ assertTrue(eq(b, b));
+ assertFalse(eq(a, b));
+ assertTrue(eq_strict(a, a));
+ assertTrue(eq_strict(b, b));
+ assertFalse(eq_strict(a, b));
+ assertEquals(lt(a, b), less);
+ assertEquals(gt(a, b), greater);
+ assertEquals(lt(b, a), greater);
+ assertEquals(gt(b, a), less);
+}
+
+var obj1 = {toString: function() {return "1";}};
+var obj2 = {toString: function() {return "2";}};
+
+var less = obj1 < obj2;
+var greater = obj1 > obj2;
+
+test(obj1, obj2, less, greater);
+test(obj1, obj2, less, greater);
+test(obj1, obj2, less, greater);
+%OptimizeFunctionOnNextCall(test);
+test(obj1, obj2, less, greater);
+test(obj1, obj2, less, greater);
+
+obj1.x = 1;
+test(obj1, obj2, less, greater);
+
+obj2.y = 2;
+test(obj1, obj2, less, greater);
+
+var obj1 = {test: 3};
+var obj2 = {test2: 3};
+
+var less = obj1 < obj2;
+var greater = obj1 > obj2;
+
+test(obj1, obj2, less, greater);
+test(obj1, obj2, less, greater);
+test(obj1, obj2, less, greater);
+%OptimizeFunctionOnNextCall(test);
+test(obj1, obj2, less, greater);
+test(obj1, obj2, less, greater);
+
+obj1.toString = function() {return "1"};
+var less = obj1 < obj2;
+var greater = obj1 > obj2;
+test(obj1, obj2, less, greater);
+%OptimizeFunctionOnNextCall(test);
+test(obj1, obj2, less, greater);
+
+obj2.toString = function() {return "2"};
+var less = true;
+var greater = false;
+
+test(obj1, obj2, less, greater);
+obj2.y = 2;
+test(obj1, obj2, less, greater);
diff --git a/deps/v8/test/mjsunit/parallel-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 9a5d31003f..699534f665 100644
--- a/deps/v8/test/mjsunit/parallel-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --track-fields --track-double-fields --allow-natives-syntax
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=100
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -49,9 +49,13 @@ add_field(new_object());
%OptimizeFunctionOnNextCall(add_field, "concurrent");
var o = new_object();
-// Trigger optimization in the background thread.
+// Kick off recompilation.
add_field(o);
-// Invalidate transition map while optimization is underway.
+// Invalidate transition map after compile graph has been created.
o.c = 2.2;
+// In the mean time, concurrent recompiling is still blocked.
+assertUnoptimized(add_field, "no sync");
+// Let concurrent recompilation proceed.
+%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization that bailed out.
assertUnoptimized(add_field, "sync");
diff --git a/deps/v8/test/mjsunit/compiler/parallel-proto-change.js b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
index 7602279893..e126465a95 100644
--- a/deps/v8/test/mjsunit/compiler/parallel-proto-change.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=50
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -43,12 +43,14 @@ assertEquals(1, f(o));
// Mark for concurrent optimization.
%OptimizeFunctionOnNextCall(f, "concurrent");
-// Trigger optimization in the background thread.
+// Kick off recompilation.
assertEquals(1, f(o));
-// While concurrent recompilation is running, optimization not yet done.
-assertUnoptimized(f, "no sync");
-// Change the prototype chain during optimization to trigger map invalidation.
+// Change the prototype chain after compile graph has been created.
o.__proto__.__proto__ = { bar: function() { return 2; } };
+// At this point, concurrent recompilation thread has not yet done its job.
+assertUnoptimized(f, "no sync");
+// Let the background thread proceed.
+%UnblockConcurrentRecompilation();
// Optimization eventually bails out due to map dependency.
assertUnoptimized(f, "sync");
assertEquals(2, f(o));
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js b/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
new file mode 100644
index 0000000000..8e21a36b40
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
@@ -0,0 +1,73 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --use-escape-analysis --max-opt-count=100
+
+// This tests that captured objects materialized through the deoptimizer
+// have field descriptors with a representation matching the values that
+// have actually been stored in the object.
+
+var values = [ function() { return {}; },
+ function() { return 23; },
+ function() { return 4.2; } ];
+
+function constructor(value_track) {
+ this.x = value_track();
+}
+
+function access(value_track, value_break, deopt) {
+ var o = new constructor(value_track);
+ o.x = value_break;
+ deopt.deopt
+ assertEquals(value_break, o.x);
+}
+
+function test(value_track, value_break) {
+ var deopt = { deopt:false };
+
+ // Warm-up field tracking to a certain representation.
+ access(value_track, value_track(), deopt);
+ access(value_track, value_track(), deopt);
+ %OptimizeFunctionOnNextCall(access);
+ access(value_track, value_track(), deopt);
+
+ // Deoptimize on a run with a different representation.
+ delete deopt.deopt;
+ access(value_track, value_break(), deopt);
+
+ // Clear type feedback of the access function for next run.
+ %ClearFunctionTypeFeedback(access);
+
+ // Also make sure the initial map of the constructor is reset.
+ constructor.prototype = {};
+}
+
+for (var i = 0; i < values.length; i++) {
+ for (var j = 0; j < values.length; j++) {
+ test(values[i], values[j])
+ }
+}
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis.js b/deps/v8/test/mjsunit/compiler/escape-analysis.js
index 74e638a538..dccc476925 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis.js
@@ -271,3 +271,73 @@
%OptimizeFunctionOnNextCall(oob);
assertEquals(7, oob(cons2, true));
})();
+
+
+// Test non-shallow nested graph of captured objects.
+(function testDeep() {
+ var deopt = { deopt:false };
+ function constructor1() {
+ this.x = 23;
+ }
+ function constructor2(nested) {
+ this.a = 17;
+ this.b = nested;
+ this.c = 42;
+ }
+ function deep() {
+ var o1 = new constructor1();
+ var o2 = new constructor2(o1);
+ assertEquals(17, o2.a);
+ assertEquals(23, o2.b.x);
+ assertEquals(42, o2.c);
+ o1.x = 99;
+ deopt.deopt;
+ assertEquals(99, o1.x);
+ assertEquals(99, o2.b.x);
+ }
+ deep(); deep();
+ %OptimizeFunctionOnNextCall(deep);
+ deep(); deep();
+ delete deopt.deopt;
+ deep(); deep();
+})();
+
+
+// Test materialization of a field that requires a Smi value.
+(function testSmiField() {
+ var deopt = { deopt:false };
+ function constructor() {
+ this.x = 1;
+ }
+ function field(x) {
+ var o = new constructor();
+ o.x = x;
+ deopt.deopt
+ assertEquals(x, o.x);
+ }
+ field(1); field(2);
+ %OptimizeFunctionOnNextCall(field);
+ field(3); field(4);
+ delete deopt.deopt;
+ field(5.5); field(6.5);
+})();
+
+
+// Test materialization of a field that requires a heap object value.
+(function testHeapObjectField() {
+ var deopt = { deopt:false };
+ function constructor() {
+ this.x = {};
+ }
+ function field(x) {
+ var o = new constructor();
+ o.x = x;
+ deopt.deopt
+ assertEquals(x, o.x);
+ }
+ field({}); field({});
+ %OptimizeFunctionOnNextCall(field);
+ field({}); field({});
+ delete deopt.deopt;
+ field(1); field(2);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/expression-trees.js b/deps/v8/test/mjsunit/compiler/expression-trees.js
index fac6b4cb65..0d971a95b2 100644
--- a/deps/v8/test/mjsunit/compiler/expression-trees.js
+++ b/deps/v8/test/mjsunit/compiler/expression-trees.js
@@ -55,46 +55,43 @@ function makeTrees(op, leaves) {
}
}
-// All 429 possible bitwise OR trees with eight leaves.
-var identifiers = ['a','b','c','d','e','f','g','h'];
+// All possible bitwise OR trees with six leaves, i.e. CatalanNumber[5] = 42,
+// see http://mathworld.wolfram.com/CatalanNumber.html.
+var identifiers = ['a','b','c','d','e','f'];
var or_trees = makeTrees("|", identifiers);
var and_trees = makeTrees("&", identifiers);
-// Set up leaf masks to set 8 least-significant bits.
+// Set up leaf masks to set 6 least-significant bits.
var a = 1 << 0;
var b = 1 << 1;
var c = 1 << 2;
var d = 1 << 3;
var e = 1 << 4;
var f = 1 << 5;
-var g = 1 << 6;
-var h = 1 << 7;
for (var i = 0; i < or_trees.length; ++i) {
- for (var j = 0; j < 8; ++j) {
+ for (var j = 0; j < 6; ++j) {
var or_fun = new Function("return " + or_trees[i]);
- if (j == 0) assertEquals(255, or_fun());
+ if (j == 0) assertEquals(63, or_fun());
// Set the j'th variable to a string to force a bailout.
eval(identifiers[j] + "+= ''");
- assertEquals(255, or_fun());
+ assertEquals(63, or_fun());
// Set it back to a number for the next iteration.
eval(identifiers[j] + "= +" + identifiers[j]);
}
}
-// Set up leaf masks to clear 8 least-significant bits.
-a ^= 255;
-b ^= 255;
-c ^= 255;
-d ^= 255;
-e ^= 255;
-f ^= 255;
-g ^= 255;
-h ^= 255;
+// Set up leaf masks to clear 6 least-significant bits.
+a ^= 63;
+b ^= 63;
+c ^= 63;
+d ^= 63;
+e ^= 63;
+f ^= 63;
for (i = 0; i < and_trees.length; ++i) {
- for (var j = 0; j < 8; ++j) {
+ for (var j = 0; j < 6; ++j) {
var and_fun = new Function("return " + and_trees[i]);
if (j == 0) assertEquals(0, and_fun());
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-global.js b/deps/v8/test/mjsunit/compiler/load-elimination-global.js
new file mode 100644
index 0000000000..9caaa9f718
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-global.js
@@ -0,0 +1,196 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --load-elimination
+
+// Test global load elimination of redundant loads and stores.
+
+var X = true; // For forcing branches.
+X = false;
+X = true;
+X = false;
+
+function B(x, y) {
+ this.x = x;
+ this.y = y;
+ return this;
+}
+
+function test_load() {
+ var a = new B(1, 2);
+ var f = a.x + a.x;
+ if (false) ;
+ return f + a.x + a.x;
+}
+
+function test_load2() {
+ var a = new B(1, 2);
+ var f = a.x + a.x;
+ if (true) ;
+ return f + a.x + a.x;
+}
+
+function test_store_load() {
+ var a = new B(1, 2);
+ a.x = 4;
+ var b = X ? a.x : a.x;
+ return b + a.x;
+}
+
+function test_store_load2() {
+ var a = new B(1, 2);
+ var c = 6;
+ if (X) a.x = c;
+ else a.x = c;
+ return a.x + a.x;
+}
+
+function test_nonaliasing_store1() {
+ var a = new B(2, 3), b = new B(3, 4);
+ if (X) ;
+ b.x = 4;
+ if (X) ;
+ var f = a.x;
+ if (X) ;
+ b.x = 5;
+ if (X) ;
+ var g = a.x;
+ if (X) ;
+ b.x = 6;
+ if (X) ;
+ var h = a.x;
+ if (X) ;
+ b.x = 7;
+ if (X) ;
+ return f + g + h + a.x;
+}
+
+function test_loop(x) {
+ var a = new B(2, 3);
+ var v = a.x;
+ var total = v;
+ var i = 0;
+ while (i++ < 10) {
+ total = a.x;
+ a.y = 4;
+ }
+ return total;
+}
+
+function test_loop2(x) {
+ var a = new B(2, 3);
+ var v = a.x;
+ var total = v;
+ var i = 0;
+ while (i++ < 10) {
+ total = a.x; // a.x not affected by loop
+ a.y = 4;
+
+ var j = 0;
+ while (j++ < 10) {
+ total = a.x; // a.x not affected by loop
+ a.y = 5;
+ }
+
+ total = a.x;
+ a.y = 6;
+
+ j = 0;
+ while (j++ < 10) {
+ total = a.x; // a.x not affected by loop
+ a.y = 7;
+ }
+ }
+ return total;
+}
+
+function killall() {
+ try { } catch(e) { }
+}
+
+%NeverOptimizeFunction(killall);
+
+function test_store_load_kill() {
+ var a = new B(1, 2);
+ if (X) ;
+ a.x = 4;
+ if (X) ;
+ var f = a.x;
+ if (X) ;
+ a.x = 5;
+ if (X) ;
+ var g = a.x;
+ if (X) ;
+ killall();
+ if (X) ;
+ a.x = 6;
+ if (X) ;
+ var h = a.x;
+ if (X) ;
+ a.x = 7;
+ if (X) ;
+ return f + g + h + a.x;
+}
+
+function test_store_store() {
+ var a = new B(6, 7);
+ if (X) ;
+ a.x = 7;
+ if (X) ;
+ a.x = 7;
+ if (X) ;
+ a.x = 7;
+ if (X) ;
+ a.x = 7;
+ if (X) ;
+ return a.x;
+}
+
+function test(x, f) {
+ X = true;
+ assertEquals(x, f());
+ assertEquals(x, f());
+ X = false;
+ assertEquals(x, f());
+ assertEquals(x, f());
+ X = true;
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(x, f());
+ assertEquals(x, f());
+ X = false;
+ assertEquals(x, f());
+ assertEquals(x, f());
+}
+
+test(4, test_load);
+test(8, test_store_load);
+test(12, test_store_load2);
+test(8, test_nonaliasing_store1);
+test(22, test_store_load_kill);
+test(7, test_store_store);
+test(2, test_loop);
+test(2, test_loop2);
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-osr.js b/deps/v8/test/mjsunit/compiler/load-elimination-osr.js
new file mode 100644
index 0000000000..a57fe173ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-osr.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --load-elimination
+
+// Test global load elimination in the presence of OSR compilation.
+
+function It(x, y) { }
+
+function foo_osr(x, limit) {
+ var o = new It();
+ o.x = x;
+ o.y = x;
+ for (var i = 0; i < limit; i++) {
+ o.y += o.x; // Load of x cannot be hoisted due to OSR.
+ }
+
+ return o.y;
+}
+
+assertEquals(22, foo_osr(11, 1));
+assertEquals(24, foo_osr(12, 1));
+assertEquals(1300013, foo_osr(13, 100000));
+
+
+function foo_hot(x, limit) {
+ var o = new It();
+ o.x = x;
+ o.y = x;
+ for (var i = 0; i < limit; i++) {
+ o.y += o.x; // Load of x can be hoisted without OSR.
+ }
+
+ return o.y;
+}
+
+assertEquals(22, foo_hot(11, 1));
+assertEquals(24, foo_hot(12, 1));
+%OptimizeFunctionOnNextCall(foo_hot);
+assertEquals(32, foo_hot(16, 1));
+assertEquals(1300013, foo_hot(13, 100000));
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination.js b/deps/v8/test/mjsunit/compiler/load-elimination.js
new file mode 100644
index 0000000000..e019508c65
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/load-elimination.js
@@ -0,0 +1,106 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --load-elimination
+
+// Test local load elimination of redundant loads and stores.
+
+function B(x, y) {
+ this.x = x;
+ this.y = y;
+ return this;
+}
+
+function test_load() {
+ var a = new B(1, 2);
+ return a.x + a.x + a.x + a.x;
+}
+
+function test_store_load() {
+ var a = new B(1, 2);
+ a.x = 4;
+ var f = a.x;
+ a.x = 5;
+ var g = a.x;
+ a.x = 6;
+ var h = a.x;
+ a.x = 7;
+ return f + g + h + a.x;
+}
+
+function test_nonaliasing_store1() {
+ var a = new B(2, 3), b = new B(3, 4);
+ b.x = 4;
+ var f = a.x;
+ b.x = 5;
+ var g = a.x;
+ b.x = 6;
+ var h = a.x;
+ b.x = 7;
+ return f + g + h + a.x;
+}
+
+function killall() {
+ try { } catch(e) { }
+}
+
+%NeverOptimizeFunction(killall);
+
+function test_store_load_kill() {
+ var a = new B(1, 2);
+ a.x = 4;
+ var f = a.x;
+ a.x = 5;
+ var g = a.x;
+ killall();
+ a.x = 6;
+ var h = a.x;
+ a.x = 7;
+ return f + g + h + a.x;
+}
+
+function test_store_store() {
+ var a = new B(6, 7);
+ a.x = 7;
+ a.x = 7;
+ a.x = 7;
+ a.x = 7;
+ return a.x;
+}
+
+function test(x, f) {
+ assertEquals(x, f());
+ assertEquals(x, f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(x, f());
+}
+
+test(4, test_load);
+test(22, test_store_load);
+test(8, test_nonaliasing_store1);
+test(22, test_store_load_kill);
+test(7, test_store_store);
diff --git a/deps/v8/test/mjsunit/manual-parallel-recompile.js b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
index 0a0e61d524..b2b63988ba 100644
--- a/deps/v8/test/mjsunit/manual-parallel-recompile.js
+++ b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=50
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -55,10 +55,13 @@ assertUnoptimized(g);
%OptimizeFunctionOnNextCall(f, "concurrent");
%OptimizeFunctionOnNextCall(g, "concurrent");
-f(g(2)); // Trigger optimization.
+f(g(2)); // Kick off recompilation.
-assertUnoptimized(f, "no sync"); // Not yet optimized while background thread
-assertUnoptimized(g, "no sync"); // is running.
+assertUnoptimized(f, "no sync"); // Not yet optimized since recompilation
+assertUnoptimized(g, "no sync"); // is still blocked.
+
+// Let concurrent recompilation proceed.
+%UnblockConcurrentRecompilation();
assertOptimized(f, "sync"); // Optimized once we sync with the
assertOptimized(g, "sync"); // background thread.
diff --git a/deps/v8/test/mjsunit/compiler/osr-alignment.js b/deps/v8/test/mjsunit/compiler/osr-alignment.js
new file mode 100644
index 0000000000..30d72d0614
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-alignment.js
@@ -0,0 +1,86 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --use-osr
+
+function f1() {
+ var sum = 0;
+ for (var i = 0; i < 1000000; i++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ return sum;
+}
+
+function f2() {
+ var sum = 0;
+ for (var i = 0; i < 1000000; i++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ return sum;
+}
+
+function f3() {
+ var sum = 0;
+ for (var i = 0; i < 1000000; i++) {
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ return sum;
+}
+
+function test1() {
+ var j = 11;
+ for (var i = 0; i < 2; i++) {
+ assertEquals(500009500000, f1());
+ }
+}
+
+function test2() {
+ for (var i = 0; i < 2; i++) {
+ var j = 11, k = 12;
+ assertEquals(500009500000, f2());
+ }
+}
+
+function test3() {
+ for (var i = 0; i < 2; i++) {
+ var j = 11, k = 13, m = 14;
+ assertEquals(500009500000, f3());
+ }
+}
+
+test1();
+test2();
+test3();
diff --git a/deps/v8/test/mjsunit/compiler/rotate.js b/deps/v8/test/mjsunit/compiler/rotate.js
index 14fe9da3e6..2f4bc5a967 100644
--- a/deps/v8/test/mjsunit/compiler/rotate.js
+++ b/deps/v8/test/mjsunit/compiler/rotate.js
@@ -222,3 +222,89 @@ for (var i = 0; i <= 100; i++) {
assertEquals(1 << ((i % 32)), ROR4(1, i));
}
+//---------------------------------------------------------
+// add test cases for constant operand
+//---------------------------------------------------------
+// constant operand: 20
+function ROR1_sa20(x) {
+ return (x >>> 20) | (x << 12);
+}
+
+function ROR2_sa20(x) {
+ return (x >>> 12) | (x << 20);
+}
+
+function ROR3_sa20(x, sa) {
+ return (x << 12) | (x >>> 20);
+}
+
+function ROR4_sa20(x) {
+ return (x << 20) | (x >>> 12);
+}
+
+// constant operand: 40
+function ROR1_sa40(x) {
+ return (x >>> 40) | (x << -8);
+}
+
+function ROR2_sa40(x) {
+ return (x >>> -8) | (x << 40);
+}
+
+function ROR3_sa40(x, sa) {
+ return (x << -8) | (x >>> 40);
+}
+
+function ROR4_sa40(x) {
+ return (x << 40) | (x >>> -8);
+}
+
+// ROR1_sa20
+assertEquals(ROR1(0x0000FFFF, 20), ROR1_sa20(0x0000FFFF));
+assertEquals(ROR1(0x0000FFFF, 20), ROR1_sa20(0x0000FFFF));
+%OptimizeFunctionOnNextCall(ROR1_sa20);
+assertEquals(ROR1(0x0000FFFF, 20), ROR1_sa20(0x0000FFFF));
+
+// ROR1_sa40
+assertEquals(ROR1(0x0000FFFF, 40), ROR1_sa40(0x0000FFFF));
+assertEquals(ROR1(0x0000FFFF, 40), ROR1_sa40(0x0000FFFF));
+%OptimizeFunctionOnNextCall(ROR1_sa40);
+assertEquals(ROR1(0x0000FFFF, 40), ROR1_sa40(0x0000FFFF));
+
+// ROR2_sa20
+assertEquals(ROR2(0xFFFFFFFF, 20), ROR2_sa20(0xFFFFFFFF));
+assertEquals(ROR2(0xFFFFFFFF, 20), ROR2_sa20(0xFFFFFFFF));
+%OptimizeFunctionOnNextCall(ROR2_sa20);
+assertEquals(ROR2(0xFFFFFFFF, 20), ROR2_sa20(0xFFFFFFFF));
+
+// ROR2_sa40
+assertEquals(ROR2(0x0000FFFF, 40), ROR2_sa40(0x0000FFFF));
+assertEquals(ROR2(0x0000FFFF, 40), ROR2_sa40(0x0000FFFF));
+%OptimizeFunctionOnNextCall(ROR2_sa40);
+assertEquals(ROR2(0x0000FFFF, 40), ROR2_sa40(0x0000FFFF));
+
+// ROR3_sa20
+assertEquals(ROR3(0x0000FFFF, 20), ROR3_sa20(0x0000FFFF));
+assertEquals(ROR3(0x0000FFFF, 20), ROR3_sa20(0x0000FFFF));
+%OptimizeFunctionOnNextCall(ROR3_sa20);
+assertEquals(ROR3(0x0000FFFF, 20), ROR3_sa20(0x0000FFFF));
+
+// ROR3_sa40
+assertEquals(ROR3(0x0000FFFF, 40), ROR3_sa40(0x0000FFFF));
+assertEquals(ROR3(0x0000FFFF, 40), ROR3_sa40(0x0000FFFF));
+%OptimizeFunctionOnNextCall(ROR3_sa40);
+assertEquals(ROR3(0x0000FFFF, 40), ROR3_sa40(0x0000FFFF));
+
+// ROR4_sa20
+assertEquals(ROR4(0x0000FFFF, 20), ROR4_sa20(0x0000FFFF));
+assertEquals(ROR4(0x0000FFFF, 20), ROR4_sa20(0x0000FFFF));
+%OptimizeFunctionOnNextCall(ROR4_sa20);
+assertEquals(ROR4(0x0000FFFF, 20), ROR4_sa20(0x0000FFFF));
+
+// ROR4_sa40
+assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
+assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
+%OptimizeFunctionOnNextCall(ROR4_sa40);
+assertEquals(ROR4(0xFFFFFFFF, 40), ROR4_sa40(0xFFFFFFFF));
+
+
diff --git a/deps/v8/test/mjsunit/parallel-initial-prototype-change.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
index 625b590fcc..d5b1b99491 100644
--- a/deps/v8/test/mjsunit/parallel-initial-prototype-change.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=100
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -43,12 +43,15 @@ assertEquals(0.5, f1(arr, 0));
// Optimized code of f1 depends on initial object and array maps.
%OptimizeFunctionOnNextCall(f1, "concurrent");
-// Trigger optimization in the background thread
+// Kick off recompilation;
assertEquals(0.5, f1(arr, 0));
-Object.prototype[1] = 1.5; // Invalidate current initial object map.
+// Invalidate current initial object map after compile graph has been created.
+Object.prototype[1] = 1.5;
assertEquals(2, f1(arr, 1));
-// Not yet optimized while background thread is running.
+// Not yet optimized since concurrent recompilation is blocked.
assertUnoptimized(f1, "no sync");
+// Let concurrent recompilation proceed.
+%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization, which bails out
// due to map dependency.
assertUnoptimized(f1, "sync");
diff --git a/deps/v8/test/mjsunit/d8-performance-now.js b/deps/v8/test/mjsunit/d8-performance-now.js
new file mode 100644
index 0000000000..13eb1d3f00
--- /dev/null
+++ b/deps/v8/test/mjsunit/d8-performance-now.js
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test the performance.now() function of d8. This test only makes sense with
+// d8.
+
+// Don't run this test in gc stress mode. Time differences may be long
+// due to garbage collections.
+%SetFlags("--gc-interval=-1");
+%SetFlags("--nostress-compaction");
+
+if (this.performance && performance.now) {
+ (function run() {
+ var start_test = performance.now();
+ // Let the retry run for maximum 100ms to reduce flakiness.
+ for (var start = performance.now();
+ start - start_test < 100;
+ start = performance.now()) {
+ var end = performance.now();
+ assertTrue(start >= start_test);
+ assertTrue(end >= start);
+ while (end - start == 0) {
+ var next = performance.now();
+ assertTrue(next >= end);
+ end = next;
+ }
+ if (end - start <= 1) {
+ // Found (sub-)millisecond granularity.
+ return;
+ } else {
+ print("Timer difference too big: " + (end - start) + "ms");
+ }
+ }
+ assertTrue(false);
+ })()
+}
diff --git a/deps/v8/test/mjsunit/debug-liveedit-4.js b/deps/v8/test/mjsunit/debug-liveedit-4.js
new file mode 100644
index 0000000000..38f751440a
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-4.js
@@ -0,0 +1,69 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+// In this test case we edit a script so that techincally function text
+// hasen't been changed. However actually function became one level more nested
+// and must be recompiled because it uses variable from outer scope.
+
+
+Debug = debug.Debug
+
+eval(
+"function TestFunction() {\n"
++ " var a = 'a';\n"
++ " var b = 'b';\n"
++ " var c = 'c';\n"
++ " function A() {\n"
++ " return 2013;\n"
++ " }\n"
++ " function B() {\n"
++ " return String([a, c]);\n"
++ " }\n"
++ " return B();\n"
++ "}\n"
+);
+
+var res = TestFunction();
+print(res);
+assertEquals('a,c', res);
+
+var script = Debug.findScript(TestFunction);
+var new_source = script.source.replace("2013", "b");
+print("new source: " + new_source);
+var change_log = new Array();
+var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
+
+print("Result: " + JSON.stringify(result) + "\n");
+print("Change log: " + JSON.stringify(change_log) + "\n");
+
+var res = TestFunction();
+print(res);
+// This might be 'a,b' without a bug fixed.
+assertEquals('a,c', res);
diff --git a/deps/v8/test/mjsunit/debug-stepin-function-call.js b/deps/v8/test/mjsunit/debug-stepin-function-call.js
index 3b5240c933..eaeebcedb2 100644
--- a/deps/v8/test/mjsunit/debug-stepin-function-call.js
+++ b/deps/v8/test/mjsunit/debug-stepin-function-call.js
@@ -142,8 +142,19 @@ function bind1() {
bound();
}
+// Test step into apply of bound function.
+function applyAndBind1() {
+ var bound = g.bind(null, 3);
+ debugger;
+ bound.apply(null, [3]);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
var testFunctions =
- [call1, call2, call3, call4, apply1, apply2, apply3, apply4, bind1];
+ [call1, call2, call3, call4, apply1, apply2, apply3, apply4, bind1,
+ applyAndBind1];
for (var i = 0; i < testFunctions.length; i++) {
state = 0;
@@ -161,4 +172,4 @@ assertNull(exception);
assertEquals(3, state);
// Get rid of the debug event listener.
-Debug.setListener(null); \ No newline at end of file
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/div-mul-minus-one.js b/deps/v8/test/mjsunit/div-mul-minus-one.js
new file mode 100644
index 0000000000..f05bf0f54c
--- /dev/null
+++ b/deps/v8/test/mjsunit/div-mul-minus-one.js
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function div(g) {
+ return (g/-1) ^ 1
+}
+
+var kMinInt = 1 << 31;
+var expected_MinInt = div(kMinInt);
+var expected_minus_zero = div(0);
+%OptimizeFunctionOnNextCall(div);
+assertEquals(expected_MinInt, div(kMinInt));
+assertOptimized(div);
+assertEquals(expected_minus_zero , div(0));
+assertOptimized(div);
+
+function mul(g) {
+ return (g * -1) ^ 1
+}
+
+expected_MinInt = mul(kMinInt);
+expected_minus_zero = mul(0);
+%OptimizeFunctionOnNextCall(mul);
+assertEquals(expected_MinInt, mul(kMinInt));
+assertOptimized(mul);
+assertEquals(expected_minus_zero , mul(0));
+assertOptimized(mul);
diff --git a/deps/v8/test/mjsunit/fast-prototype.js b/deps/v8/test/mjsunit/fast-prototype.js
index 83bcffe44f..d700c3c3cc 100644
--- a/deps/v8/test/mjsunit/fast-prototype.js
+++ b/deps/v8/test/mjsunit/fast-prototype.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --expose-gc
// TODO(mstarzinger): This test does not succeed when GCs happen in
// between prototype transitions, we disable GC stress for now.
@@ -84,6 +84,8 @@ function test(use_new, add_first, set__proto__, same_map_as) {
return proto;
}
+// TODO(mstarzinger): This test fails easily if gc happens at the wrong time.
+gc();
for (var i = 0; i < 4; i++) {
var set__proto__ = ((i & 1) != 0);
diff --git a/deps/v8/test/mjsunit/harmony/math-sign.js b/deps/v8/test/mjsunit/harmony/math-sign.js
new file mode 100644
index 0000000000..8a89d62828
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-sign.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-maths
+
+assertEquals("Infinity", String(1/Math.sign(0)));
+assertEquals("-Infinity", String(1/Math.sign(-0)));
+assertEquals(1, Math.sign(100));
+assertEquals(-1, Math.sign(-199));
+assertEquals(1, Math.sign(100.1));
+assertTrue(isNaN(Math.sign("abc")));
+assertTrue(isNaN(Math.sign({})));
+assertEquals(0, Math.sign([]));
+assertEquals(1, Math.sign([1]));
+assertEquals(-1, Math.sign([-100.1]));
+assertTrue(isNaN(Math.sign([1, 1])));
+assertEquals(1, Math.sign({ toString: function() { return "100"; } }));
+assertEquals(1, Math.sign({ toString: function() { return 100; } }));
+assertEquals(-1, Math.sign({ valueOf: function() { return -1.1; } }));
+assertEquals(-1, Math.sign({ valueOf: function() { return "-1.1"; } }));
+assertEquals(-1, Math.sign(-Infinity));
+assertEquals(1, Math.sign(Infinity));
+assertEquals(-1, Math.sign("-Infinity"));
+assertEquals(1, Math.sign("Infinity"));
diff --git a/deps/v8/test/mjsunit/harmony/math-trunc.js b/deps/v8/test/mjsunit/harmony/math-trunc.js
new file mode 100644
index 0000000000..ed91ed1380
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/math-trunc.js
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-maths
+
+assertEquals("Infinity", String(1/Math.trunc(0)));
+assertEquals("-Infinity", String(1/Math.trunc(-0)));
+assertEquals("Infinity", String(1/Math.trunc(Math.PI/4)));
+assertEquals("-Infinity", String(1/Math.trunc(-Math.sqrt(2)/2)));
+assertEquals(100, Math.trunc(100));
+assertEquals(-199, Math.trunc(-199));
+assertEquals(100, Math.trunc(100.1));
+assertTrue(isNaN(Math.trunc("abc")));
+assertTrue(isNaN(Math.trunc({})));
+assertEquals(0, Math.trunc([]));
+assertEquals(1, Math.trunc([1]));
+assertEquals(-100, Math.trunc([-100.1]));
+assertTrue(isNaN(Math.trunc([1, 1])));
+assertEquals(-100, Math.trunc({ toString: function() { return "-100.3"; } }));
+assertEquals(10, Math.trunc({ toString: function() { return 10.1; } }));
+assertEquals(-1, Math.trunc({ valueOf: function() { return -1.1; } }));
+assertEquals("-Infinity",
+ String(1/Math.trunc({ valueOf: function() { return "-0.1"; } })));
+assertEquals("-Infinity", String(Math.trunc(-Infinity)));
+assertEquals("Infinity", String(Math.trunc(Infinity)));
+assertEquals("-Infinity", String(Math.trunc("-Infinity")));
+assertEquals("Infinity", String(Math.trunc("Infinity")));
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/harmony/object-observe.js
index 75f0ff8bb8..f94ab75e9a 100644
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ b/deps/v8/test/mjsunit/harmony/object-observe.js
@@ -110,14 +110,16 @@ Object.defineProperty(changeRecordWithAccessor, 'name', {
// Object.observe
-assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
+assertThrows(function() { Object.observe("non-object", observer.callback); },
+ TypeError);
assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, 1); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, [undefined]); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, [1]); }, TypeError);
-assertThrows(function() { Object.observe(obj, function() {}, ['foo', null]); }, TypeError);
-assertEquals(obj, Object.observe(obj, observer.callback, ['foo', 'bar', 'baz']));
+assertEquals(obj, Object.observe(obj, observer.callback, [1]));
+assertEquals(obj, Object.observe(obj, observer.callback, [true]));
+assertEquals(obj, Object.observe(obj, observer.callback, ['foo', null]));
+assertEquals(obj, Object.observe(obj, observer.callback, [undefined]));
+assertEquals(obj, Object.observe(obj, observer.callback,
+ ['foo', 'bar', 'baz']));
assertEquals(obj, Object.observe(obj, observer.callback, []));
assertEquals(obj, Object.observe(obj, observer.callback, undefined));
assertEquals(obj, Object.observe(obj, observer.callback));
@@ -202,6 +204,25 @@ observer.assertCallbackRecords([
{ object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
]);
+// Non-string accept values are coerced to strings
+reset();
+Object.observe(obj, observer.callback, [true, 1, null, undefined]);
+notifier = Object.getNotifier(obj);
+notifier.notify({ type: 'true' });
+notifier.notify({ type: 'false' });
+notifier.notify({ type: '1' });
+notifier.notify({ type: '-1' });
+notifier.notify({ type: 'null' });
+notifier.notify({ type: 'nill' });
+notifier.notify({ type: 'undefined' });
+notifier.notify({ type: 'defined' });
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'true' },
+ { object: obj, type: '1' },
+ { object: obj, type: 'null' },
+ { object: obj, type: 'undefined' }
+]);
// No delivery takes place if no records are pending
reset();
@@ -265,6 +286,20 @@ observer.assertCallbackRecords([
{ object: obj, type: 'new', name: 'id' },
]);
+// The empty-string property is observable
+reset();
+var obj = {};
+Object.observe(obj, observer.callback);
+obj[''] = '';
+obj[''] = ' ';
+delete obj[''];
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, type: 'new', name: '' },
+ { object: obj, type: 'updated', name: '', oldValue: '' },
+ { object: obj, type: 'deleted', name: '', oldValue: ' ' },
+]);
+
// Observing a continuous stream of changes, while itermittantly unobserving.
reset();
Object.observe(obj, observer.callback);
@@ -307,7 +342,7 @@ observer.assertCallbackRecords([
// Accept
reset();
-Object.observe(obj, observer.callback, []);
+Object.observe(obj, observer.callback, ['somethingElse']);
Object.getNotifier(obj).notify({
type: 'new'
});
@@ -1233,6 +1268,75 @@ observer.assertCallbackRecords([
{ object: array, name: '0', type: 'updated', oldValue: 2 },
]);
+// Splice emitted after Array mutation methods
+function MockArray(initial, observer) {
+ for (var i = 0; i < initial.length; i++)
+ this[i] = initial[i];
+
+ this.length_ = initial.length;
+ this.observer = observer;
+}
+MockArray.prototype = {
+ set length(length) {
+ Object.getNotifier(this).notify({ type: 'lengthChange' });
+ this.length_ = length;
+ Object.observe(this, this.observer.callback, ['splice']);
+ },
+ get length() {
+ return this.length_;
+ }
+}
+
+reset();
+var array = new MockArray([], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.push.call(array, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, type: 'lengthChange' },
+ { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
+]);
+
+reset();
+var array = new MockArray([1], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.pop.call(array);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, type: 'lengthChange' },
+ { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
+]);
+
+reset();
+var array = new MockArray([1], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.shift.call(array);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, type: 'lengthChange' },
+ { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
+]);
+
+reset();
+var array = new MockArray([], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.unshift.call(array, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, type: 'lengthChange' },
+ { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
+]);
+
+reset();
+var array = new MockArray([0, 1, 2], observer);
+Object.observe(array, observer.callback, ['lengthChange']);
+Array.prototype.splice.call(array, 1, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, type: 'lengthChange' },
+ { object: array, type: 'splice', index: 1, removed: [1], addedCount: 0 },
+]);
+
//
// === PLAIN OBJECTS ===
//
diff --git a/deps/v8/test/mjsunit/harmony/typedarrays.js b/deps/v8/test/mjsunit/harmony/typedarrays.js
index c6d130fc0c..e20fbade9b 100644
--- a/deps/v8/test/mjsunit/harmony/typedarrays.js
+++ b/deps/v8/test/mjsunit/harmony/typedarrays.js
@@ -123,6 +123,7 @@ function TestTypedArray(constr, elementSize, typicalElement) {
var ab = new ArrayBuffer(256*elementSize);
var a0 = new constr(30);
+ assertTrue(ArrayBuffer.isView(a0));
assertSame(elementSize, a0.BYTES_PER_ELEMENT);
assertSame(30, a0.length);
assertSame(30*elementSize, a0.byteLength);
@@ -476,6 +477,7 @@ function TestDataViewConstructor() {
var ab = new ArrayBuffer(256);
var d1 = new DataView(ab, 1, 255);
+ assertTrue(ArrayBuffer.isView(d1));
assertSame(ab, d1.buffer);
assertSame(1, d1.byteOffset);
assertSame(255, d1.byteLength);
diff --git a/deps/v8/test/mjsunit/lithium/DivI.js b/deps/v8/test/mjsunit/lithium/DivI.js
new file mode 100644
index 0000000000..5420d8c8d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/lithium/DivI.js
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --no-use-osr
+
+function foo(a, b) {
+ var result = a / 35;
+ result += 50 / b;
+ result += a / b;
+ result += a / -1;
+ result += a / 1;
+ result += a / 4;
+ result += a / -4;
+ return result / b;
+}
+
+foo(700, 5);
+var r1 = foo(700, 5);
+%OptimizeFunctionOnNextCall(foo);
+var r2 = foo(700, 5);
+
+assertEquals(r1, r2);
+
+function boo(value) {
+ return value / -1;
+}
+
+// Test deoptimization of MinInt / -1.
+assertEquals(2147483600, boo(-2147483600));
+assertEquals(2147483600, boo(-2147483600));
+%OptimizeFunctionOnNextCall(boo);
+assertEquals(2147483600, boo(-2147483600));
+assertEquals(2147483648, boo(-2147483648));
diff --git a/deps/v8/test/intl/date-format/utils.js b/deps/v8/test/mjsunit/lithium/MathExp.js
index 535de15e9a..854ff5fd7f 100644
--- a/deps/v8/test/intl/date-format/utils.js
+++ b/deps/v8/test/mjsunit/lithium/MathExp.js
@@ -25,12 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Utility methods for date testing.
+// Flags: --allow-natives-syntax
-/**
- * Returns date with timezone info forced into PDT.
- */
-function usePDT(dateString) {
- var removedTZ = dateString.replace(/(\+|-)\d{4}/, '-0007');
- return removedTZ.replace(/\(.*?\)/, '(PDT)');
+function foo(x) {
+ return Math.exp(x);
}
+
+foo(12.3);
+var r1 = foo(12.3);
+%OptimizeFunctionOnNextCall(foo);
+var r2 = foo(12.3);
+
+assertEquals(r1, r2);
diff --git a/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js b/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
new file mode 100644
index 0000000000..3c890a8489
--- /dev/null
+++ b/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function MyStringFromCharCode(code, i) {
+ var one_byte = %NewString(3, true);
+ %_OneByteSeqStringSetChar(one_byte, 0, code);
+ %_OneByteSeqStringSetChar(one_byte, 1, code);
+ %_OneByteSeqStringSetChar(one_byte, i, code);
+ var two_byte = %NewString(3, false);
+ %_TwoByteSeqStringSetChar(two_byte, 0, code);
+ %_TwoByteSeqStringSetChar(two_byte, 1, code);
+ %_TwoByteSeqStringSetChar(two_byte, i, code);
+ return one_byte + two_byte;
+}
+
+MyStringFromCharCode(65, 2);
+var r1 = MyStringFromCharCode(65, 2);
+%OptimizeFunctionOnNextCall(MyStringFromCharCode);
+var r2 = MyStringFromCharCode(65, 2);
+assertEquals(r1, r2);
diff --git a/deps/v8/test/mjsunit/lithium/StoreKeyed.js b/deps/v8/test/mjsunit/lithium/StoreKeyed.js
new file mode 100644
index 0000000000..d34f390d25
--- /dev/null
+++ b/deps/v8/test/mjsunit/lithium/StoreKeyed.js
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --no-use-osr
+
+function foo(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+function foo_int(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+var A1 = [1.2, 2.3];
+var A2 = [1.2, 2.3];
+var A3 = [1.2, 2.3];
+
+var A1_int = [12, 23];
+var A2_int = [12, 23];
+var A3_int = [12, 23];
+
+foo(A1, 1, 3.4);
+foo(A2, 1, 3.4);
+%OptimizeFunctionOnNextCall(foo);
+foo(A3, 1, 3.4);
+
+foo_int(A1_int, 1, 34);
+foo_int(A2_int, 1, 34);
+%OptimizeFunctionOnNextCall(foo_int);
+foo_int(A3_int, 1, 34);
+
+assertEquals(A1[0], A3[0]);
+assertEquals(A1[1], A3[1]);
+assertEquals(A1_int[0], A3_int[0]);
+assertEquals(A1_int[1], A3_int[1]);
diff --git a/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js b/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js
new file mode 100644
index 0000000000..a5670fee95
--- /dev/null
+++ b/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js
@@ -0,0 +1,109 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --no-use-osr
+
+function foo_pixel(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+function foo_uint16(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+function foo_uint32(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+function foo_float(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+function foo_double(a, i, v) {
+ a[0] = v;
+ a[i] = v;
+}
+
+var A1_pixel = new Uint8ClampedArray(2);
+var A2_pixel = new Uint8ClampedArray(2);
+var A3_pixel = new Uint8ClampedArray(2);
+
+var A1_uint16 = new Uint16Array(2);
+var A2_uint16 = new Uint16Array(2);
+var A3_uint16 = new Uint16Array(2);
+
+var A1_uint32 = new Uint32Array(2);
+var A2_uint32 = new Uint32Array(2);
+var A3_uint32 = new Uint32Array(2);
+
+var A1_float = new Float32Array(2);
+var A2_float = new Float32Array(2);
+var A3_float = new Float32Array(2);
+
+var A1_double = new Float64Array(2);
+var A2_double = new Float64Array(2);
+var A3_double = new Float64Array(2);
+
+foo_pixel(A1_pixel, 1, 34);
+foo_pixel(A2_pixel, 1, 34);
+%OptimizeFunctionOnNextCall(foo_pixel);
+foo_pixel(A3_pixel, 1, 34);
+
+foo_uint16(A1_uint16, 1, 3.4);
+foo_uint16(A2_uint16, 1, 3.4);
+%OptimizeFunctionOnNextCall(foo_uint16);
+foo_uint16(A3_uint16, 1, 3.4);
+
+foo_uint32(A1_uint32, 1, 3.4);
+foo_uint32(A2_uint32, 1, 3.4);
+%OptimizeFunctionOnNextCall(foo_uint32);
+foo_uint32(A3_uint32, 1, 3.4);
+
+foo_float(A1_float, 1, 3.4);
+foo_float(A2_float, 1, 3.4);
+%OptimizeFunctionOnNextCall(foo_float);
+foo_float(A3_float, 1, 3.4);
+
+foo_double(A1_double, 1, 3.4);
+foo_double(A2_double, 1, 3.4);
+%OptimizeFunctionOnNextCall(foo_double);
+foo_double(A3_double, 1, 3.4);
+
+assertEquals(A1_pixel[0], A3_pixel[0]);
+assertEquals(A1_pixel[1], A3_pixel[1]);
+assertEquals(A1_uint16[0], A3_uint16[0]);
+assertEquals(A1_uint16[1], A3_uint16[1]);
+assertEquals(A1_uint32[0], A3_uint32[0]);
+assertEquals(A1_uint32[1], A3_uint32[1]);
+assertEquals(A1_float[0], A3_float[0]);
+assertEquals(A1_float[1], A3_float[1]);
+assertEquals(A1_double[0], A3_double[0]);
+assertEquals(A1_double[1], A3_double[1]);
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index ee35af5a61..256bd3ecd4 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -25,184 +25,204 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix mjsunit
-
-# All tests in the bug directory are expected to fail.
-bugs/*: FAIL
-
-##############################################################################
-# Fails.
-regress/regress-1119: FAIL
-
-# Issue 1719: Slow to collect arrays over several contexts.
-regress/regress-524: SKIP
-# When that bug is fixed, revert the expectation to:
-# Skip long running test in debug and allow it to timeout in release mode.
-# regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
-
-# This test non-deterministically runs out of memory on Windows ia32.
-regress/regress-crbug-160010: SKIP
-
-##############################################################################
-# Too slow in debug mode with --stress-opt mode.
-compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
-compiler/regress-funcaller: PASS, SKIP if $mode == debug
-regress/regress-2318: PASS, SKIP if $mode == debug
-regress/regress-create-exception: PASS, SKIP if $mode == debug
-regress/regress-2612: PASS, SKIP if $mode == debug
-
-##############################################################################
-# Too slow in debug mode for GC stress mode.
-regress/regress-crbug-217858: PASS, SKIP if $mode == debug
+[
+[ALWAYS, {
+ # All tests in the bug directory are expected to fail.
+ 'bugs/*': [FAIL],
+
+ # TODO(mvstanton) Re-enable when the performance is bearable again.
+ 'regress/regress-2185-2': [SKIP],
+
+ ##############################################################################
+ # Flaky tests.
+ # BUG(v8:2921): Flaky on ia32 nosnap, arm and nacl.
+ 'debug-step-4-in-frame': [PASS, [('system == linux and arch == ia32 or '
+ 'arch == arm or arch == nacl_ia32 or '
+ 'arch == nacl_x64'), FLAKY]],
+
+ ##############################################################################
+ # Fails.
+ 'regress/regress-1119': [FAIL],
+
+ # Issue 1719: Slow to collect arrays over several contexts.
+ 'regress/regress-524': [SKIP],
+ # When that bug is fixed, revert the expectation to:
+ # Skip long running test in debug and allow it to timeout in release mode.
+ # regress/regress-524: [PASS, TIMEOUT, ['mode == debug', SKIP]],
+
+ # This test non-deterministically runs out of memory on Windows ia32.
+ 'regress/regress-crbug-160010': [SKIP],
+
+ ##############################################################################
+ # Too slow in debug mode with --stress-opt mode.
+ 'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
+ 'compiler/regress-funcaller': [PASS, ['mode == debug', SKIP]],
+ 'regress/regress-2318': [PASS, ['mode == debug', SKIP]],
+ 'regress/regress-create-exception': [PASS, ['mode == debug', SKIP]],
+
+ ##############################################################################
+ # Too slow in debug mode for GC stress mode.
+ 'regress/regress-crbug-217858': [PASS, ['mode == debug', SKIP]],
+
+ ##############################################################################
+ # Only regexp stuff tested, no need for extensive Crankshaft tests.
+ 'regexp-global': [PASS, NO_VARIANTS],
+
+ ##############################################################################
+ # No need to waste time for this test.
+ 'd8-performance-now': [PASS, NO_VARIANTS],
+
+ ##############################################################################
+ # These use a built-in that's only present in debug mode. They take
+ # too long to run in debug mode on ARM and MIPS.
+ 'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == mipsel', SKIP]],
+
+ 'big-object-literal': [PASS, ['arch == arm or arch == android_arm', SKIP]],
+
+ # Issue 488: this test sometimes times out.
+ 'array-constructor': [PASS, TIMEOUT],
+
+ # Very slow on ARM and MIPS, contains no architecture dependent code.
+ 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == mipsel', TIMEOUT]],
+
+ ##############################################################################
+ # This test expects to reach a certain recursion depth, which may not work
+ # for debug mode.
+ 'json-recursive': [PASS, ['mode == debug', PASS, FAIL]],
+
+ ##############################################################################
+ # Skip long running tests that time out in debug mode.
+ 'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
+
+ ##############################################################################
+ # This test sets the umask on a per-process basis and hence cannot be
+ # used in multi-threaded runs.
+ # On android there is no /tmp directory.
+ 'd8-os': [PASS, ['isolates or arch == android_arm or arch == android_ia32', SKIP]],
+ 'tools/tickprocessor': [PASS, ['arch == android_arm or arch == android_ia32', SKIP]],
+
+ ##############################################################################
+ # Long running test that reproduces memory leak and should be run manually.
+ 'regress/regress-2073': [SKIP],
+}], # ALWAYS
##############################################################################
-# These use a built-in that's only present in debug mode. They take
-# too long to run in debug mode on ARM and MIPS.
-fuzz-natives-part*: PASS, SKIP if ($mode == release || $arch == arm || $arch == android_arm || $arch == mipsel)
-
-big-object-literal: PASS, SKIP if ($arch == arm || $arch == android_arm)
-
-# Issue 488: this test sometimes times out.
-array-constructor: PASS || TIMEOUT
-
-# Very slow on ARM and MIPS, contains no architecture dependent code.
-unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel)
+['arch == arm or arch == android_arm', {
+
+ # Slow tests which times out in debug mode.
+ 'try': [PASS, ['mode == debug', SKIP]],
+ 'debug-scripts-request': [PASS, ['mode == debug', SKIP]],
+ 'array-constructor': [PASS, ['mode == debug', SKIP]],
+ 'regress/regress-1122': [PASS, ['mode == debug and arch == android_arm', SKIP]],
+
+ # Flaky test that can hit compilation-time stack overflow in debug mode.
+ 'unicode-test': [PASS, ['mode == debug', PASS, FAIL]],
+
+ # Times out often in release mode on ARM.
+ 'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
+ 'array-splice': [PASS, TIMEOUT],
+
+ # Long running test.
+ 'string-indexof-2': [PASS, TIMEOUT],
+ 'mirror-object': [PASS, TIMEOUT],
+
+ # BUG(3251035): Timeouts in long looping crankshaft optimization
+ # tests. Skipping because having them timeout takes too long on the
+ # buildbot.
+ 'compiler/alloc-number': [SKIP],
+ 'compiler/array-length': [SKIP],
+ 'compiler/assignment-deopt': [SKIP],
+ 'compiler/deopt-args': [SKIP],
+ 'compiler/inline-compare': [SKIP],
+ 'compiler/inline-global-access': [SKIP],
+ 'compiler/optimized-function-calls': [SKIP],
+ 'compiler/pic': [SKIP],
+ 'compiler/property-calls': [SKIP],
+ 'compiler/recursive-deopt': [SKIP],
+ 'compiler/regress-4': [SKIP],
+ 'compiler/regress-funcaller': [SKIP],
+ 'compiler/regress-rep-change': [SKIP],
+ 'compiler/regress-arguments': [SKIP],
+ 'compiler/regress-funarguments': [SKIP],
+ 'compiler/regress-3249650': [SKIP],
+ 'compiler/simple-deopt': [SKIP],
+ 'regress/regress-490': [SKIP],
+ 'regress/regress-634': [SKIP],
+ 'regress/regress-create-exception': [SKIP],
+ 'regress/regress-3218915': [SKIP],
+ 'regress/regress-3247124': [SKIP],
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
+
+ # Currently always deopt on minus zero
+ 'math-floor-of-div-minus-zero': [SKIP],
+}], # 'arch == arm or arch == android_arm'
##############################################################################
-# This test expects to reach a certain recursion depth, which may not work
-# for debug mode.
-json-recursive: PASS, (PASS || FAIL) if $mode == debug
-
-##############################################################################
-# Skip long running tests that time out in debug mode.
-generated-transition-stub: PASS, SKIP if $mode == debug
-
-##############################################################################
-# This test sets the umask on a per-process basis and hence cannot be
-# used in multi-threaded runs.
-# On android there is no /tmp directory.
-d8-os: PASS, SKIP if ($isolates || $arch == android_arm || $arch == android_ia32)
-tools/tickprocessor: PASS, SKIP if ($arch == android_arm || $arch == android_ia32)
-
-##############################################################################
-# Long running test that reproduces memory leak and should be run manually.
-regress/regress-2073: SKIP
-
-##############################################################################
-[ $arch == arm || $arch == android_arm ]
-
-# Slow tests which times out in debug mode.
-try: PASS, SKIP if $mode == debug
-debug-scripts-request: PASS, SKIP if $mode == debug
-array-constructor: PASS, SKIP if $mode == debug
-regress/regress-1122: PASS, SKIP if ($mode == debug && $arch == android_arm)
-
-# Flaky test that can hit compilation-time stack overflow in debug mode.
-unicode-test: PASS, (PASS || FAIL) if $mode == debug
-
-# Times out often in release mode on ARM.
-compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
-array-splice: PASS || TIMEOUT
-
-# Long running test.
-string-indexof-2: PASS || TIMEOUT
-mirror-object: PASS || TIMEOUT
-
-# BUG(3251035): Timeouts in long looping crankshaft optimization
-# tests. Skipping because having them timeout takes too long on the
-# buildbot.
-compiler/alloc-number: SKIP
-compiler/array-length: SKIP
-compiler/assignment-deopt: SKIP
-compiler/deopt-args: SKIP
-compiler/inline-compare: SKIP
-compiler/inline-global-access: SKIP
-compiler/optimized-function-calls: SKIP
-compiler/pic: SKIP
-compiler/property-calls: SKIP
-compiler/recursive-deopt: SKIP
-compiler/regress-4: SKIP
-compiler/regress-funcaller: SKIP
-compiler/regress-rep-change: SKIP
-compiler/regress-arguments: SKIP
-compiler/regress-funarguments: SKIP
-compiler/regress-3249650: SKIP
-compiler/simple-deopt: SKIP
-regress/regress-490: SKIP
-regress/regress-634: SKIP
-regress/regress-create-exception: SKIP
-regress/regress-3218915: SKIP
-regress/regress-3247124: SKIP
-
-# Requires bigger stack size in the Genesis and if stack size is increased,
-# the test requires too much time to run. However, the problem test covers
-# should be platform-independent.
-regress/regress-1132: SKIP
-
-# Stack manipulations in LiveEdit is not implemented for this arch.
-debug-liveedit-check-stack: SKIP
-debug-liveedit-stack-padding: SKIP
-debug-liveedit-restart-frame: SKIP
-debug-liveedit-double-call: SKIP
-
-# Currently always deopt on minus zero
-math-floor-of-div-minus-zero: SKIP
-
-##############################################################################
-[ $arch == mipsel ]
-
-# Slow tests which times out in debug mode.
-try: PASS, SKIP if $mode == debug
-debug-scripts-request: PASS, SKIP if $mode == debug
-array-constructor: PASS, SKIP if $mode == debug
-
-# Times out often in release mode on MIPS.
-compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
-array-splice: PASS || TIMEOUT
-
-# Long running test.
-mirror-object: PASS || TIMEOUT
-string-indexof-2: PASS || TIMEOUT
-
-# BUG(3251035): Timeouts in long looping crankshaft optimization
-# tests. Skipping because having them timeout takes too long on the
-# buildbot.
-compiler/alloc-number: SKIP
-compiler/array-length: SKIP
-compiler/assignment-deopt: SKIP
-compiler/deopt-args: SKIP
-compiler/inline-compare: SKIP
-compiler/inline-global-access: SKIP
-compiler/optimized-function-calls: SKIP
-compiler/pic: SKIP
-compiler/property-calls: SKIP
-compiler/recursive-deopt: SKIP
-compiler/regress-4: SKIP
-compiler/regress-funcaller: SKIP
-compiler/regress-rep-change: SKIP
-compiler/regress-arguments: SKIP
-compiler/regress-funarguments: SKIP
-compiler/regress-3249650: SKIP
-compiler/simple-deopt: SKIP
-regress/regress-490: SKIP
-regress/regress-634: SKIP
-regress/regress-create-exception: SKIP
-regress/regress-3218915: SKIP
-regress/regress-3247124: SKIP
-
-# Requires bigger stack size in the Genesis and if stack size is increased,
-# the test requires too much time to run. However, the problem test covers
-# should be platform-independent.
-regress/regress-1132: SKIP
-
-# Stack manipulations in LiveEdit is not implemented for this arch.
-debug-liveedit-check-stack: SKIP
-debug-liveedit-stack-padding: SKIP
-debug-liveedit-restart-frame: SKIP
-debug-liveedit-double-call: SKIP
-
-# Currently always deopt on minus zero
-math-floor-of-div-minus-zero: SKIP
+['arch == mipsel', {
+
+ # Slow tests which times out in debug mode.
+ 'try': [PASS, ['mode == debug', SKIP]],
+ 'debug-scripts-request': [PASS, ['mode == debug', SKIP]],
+ 'array-constructor': [PASS, ['mode == debug', SKIP]],
+
+ # Times out often in release mode on MIPS.
+ 'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
+ 'array-splice': [PASS, TIMEOUT],
+
+ # Long running test.
+ 'mirror-object': [PASS, TIMEOUT],
+ 'string-indexof-2': [PASS, TIMEOUT],
+
+ # BUG(3251035): Timeouts in long looping crankshaft optimization
+ # tests. Skipping because having them timeout takes too long on the
+ # buildbot.
+ 'compiler/alloc-number': [SKIP],
+ 'compiler/array-length': [SKIP],
+ 'compiler/assignment-deopt': [SKIP],
+ 'compiler/deopt-args': [SKIP],
+ 'compiler/inline-compare': [SKIP],
+ 'compiler/inline-global-access': [SKIP],
+ 'compiler/optimized-function-calls': [SKIP],
+ 'compiler/pic': [SKIP],
+ 'compiler/property-calls': [SKIP],
+ 'compiler/recursive-deopt': [SKIP],
+ 'compiler/regress-4': [SKIP],
+ 'compiler/regress-funcaller': [SKIP],
+ 'compiler/regress-rep-change': [SKIP],
+ 'compiler/regress-arguments': [SKIP],
+ 'compiler/regress-funarguments': [SKIP],
+ 'compiler/regress-3249650': [SKIP],
+ 'compiler/simple-deopt': [SKIP],
+ 'regress/regress-490': [SKIP],
+ 'regress/regress-634': [SKIP],
+ 'regress/regress-create-exception': [SKIP],
+ 'regress/regress-3218915': [SKIP],
+ 'regress/regress-3247124': [SKIP],
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
+
+ # Currently always deopt on minus zero
+ 'math-floor-of-div-minus-zero': [SKIP],
+}], # 'arch == mipsel'
##############################################################################
# Native Client uses the ARM simulator so will behave similarly to arm
@@ -210,46 +230,52 @@ math-floor-of-div-minus-zero: SKIP
# TODO(bradchen): enable more tests for NaCl V8 when it stops using
# the ARM simulator.
##############################################################################
-[ $arch == nacl_ia32 || $arch == nacl_x64 ]
-# There is no /tmp directory for NaCl runs
-d8-os: SKIP
+['arch == nacl_ia32 or arch == nacl_x64', {
+ # There is no /tmp directory for NaCl runs
+ 'd8-os': [SKIP],
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
-# Stack manipulations in LiveEdit is not implemented for this arch.
-debug-liveedit-check-stack: SKIP
-debug-liveedit-stack-padding: SKIP
-debug-liveedit-restart-frame: SKIP
-debug-liveedit-double-call: SKIP
+ # This test dumps core for arm.debug, so no reason to expect it to work
+ # for NaCl. The other three fuzz-natives tests seem to run fine.
+ # As noted above none of them are run in the arm.debug case.
+ 'fuzz-natives-part4': [SKIP],
-# This test dumps core for arm.debug, so no reason to expect it to work
-# for NaCl. The other three fuzz-natives tests seem to run fine.
-# As noted above none of them are run in the arm.debug case.
-fuzz-natives-part4: SKIP
+ # NaCl builds have problems with this test since Pepper_28.
+ # V8 Issue 2786
+ 'math-exp-precision': [SKIP],
-# NaCl builds have problems with this test since Pepper_28.
-# V8 Issue 2786
-math-exp-precision: SKIP
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
-# Requires bigger stack size in the Genesis and if stack size is increased,
-# the test requires too much time to run. However, the problem test covers
-# should be platform-independent.
-regress/regress-1132: SKIP
+ # Poor performance for NaCl V8 causes an assertion failure for this test.
+ 'regress/regress-165637': [SKIP],
-# Poor performance for NaCl V8 causes an assertion failure for this test.
-regress/regress-165637: SKIP
+ # Skip long running test that times out in debug mode and goes OOM on NaCl.
+ 'regress/regress-crbug-160010': [SKIP],
-# Skip long running test that times out in debug mode and goes OOM on NaCl.
-regress/regress-crbug-160010: SKIP
+ # Bug(v8:2978).
+ 'lithium/MathExp': [PASS, FAIL],
+}], # 'arch == nacl_ia32 or arch == nacl_x64'
##############################################################################
-[ $deopt_fuzzer == True ]
-
-# Skip tests that are not suitable for deoptimization fuzzing.
-assert-opt-and-deopt: SKIP
-never-optimize: SKIP
-regress/regress-2185-2: SKIP
-harmony/object-observe: SKIP
-readonly: SKIP
-array-feedback: SKIP
-
-# Deopt every n garbage collections collides with the deopt every n times flag.
-regress/regress-2653: SKIP
+['deopt_fuzzer == True', {
+
+ # Skip tests that are not suitable for deoptimization fuzzing.
+ 'assert-opt-and-deopt': [SKIP],
+ 'never-optimize': [SKIP],
+ 'regress/regress-2185-2': [SKIP],
+ 'harmony/object-observe': [SKIP],
+ 'readonly': [SKIP],
+ 'array-feedback': [SKIP],
+
+ # Deopt every n garbage collections collides with deopt every n times.
+ 'regress/regress-2653': [SKIP],
+}], # 'deopt_fuzzer == True'
+]
diff --git a/deps/v8/test/mjsunit/number-tostring-add.js b/deps/v8/test/mjsunit/number-tostring-add.js
new file mode 100644
index 0000000000..41d3cbd525
--- /dev/null
+++ b/deps/v8/test/mjsunit/number-tostring-add.js
@@ -0,0 +1,89 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function add(a, b) {
+ return a + b;
+}
+
+function testToString(a, b) {
+ assertEquals(a, b.toString());
+ assertEquals(a, "" + b);
+ assertEquals(a, add("", b));
+ assertEquals("yes" + a, add("yes", b));
+}
+
+testToString("NaN", (NaN));
+testToString("Infinity", (1/0));
+testToString("-Infinity", (-1/0));
+testToString("0", (0));
+testToString("9", (9));
+testToString("90", (90));
+testToString("90.12", (90.12));
+testToString("0.1", (0.1));
+testToString("0.01", (0.01));
+testToString("0.0123", (0.0123));
+testToString("111111111111111110000", (111111111111111111111));
+testToString("1.1111111111111111e+21", (1111111111111111111111));
+testToString("1.1111111111111111e+22", (11111111111111111111111));
+testToString("0.00001", (0.00001));
+testToString("0.000001", (0.000001));
+testToString("1e-7", (0.0000001));
+testToString("1.2e-7", (0.00000012));
+testToString("1.23e-7", (0.000000123));
+testToString("1e-8", (0.00000001));
+testToString("1.2e-8", (0.000000012));
+testToString("1.23e-8", (0.0000000123));
+
+testToString("0", (-0));
+testToString("-9", (-9));
+testToString("-90", (-90));
+testToString("-90.12", (-90.12));
+testToString("-0.1", (-0.1));
+testToString("-0.01", (-0.01));
+testToString("-0.0123", (-0.0123));
+testToString("-111111111111111110000", (-111111111111111111111));
+testToString("-1.1111111111111111e+21", (-1111111111111111111111));
+testToString("-1.1111111111111111e+22", (-11111111111111111111111));
+testToString("-0.00001", (-0.00001));
+testToString("-0.000001", (-0.000001));
+testToString("-1e-7", (-0.0000001));
+testToString("-1.2e-7", (-0.00000012));
+testToString("-1.23e-7", (-0.000000123));
+testToString("-1e-8", (-0.00000001));
+testToString("-1.2e-8", (-0.000000012));
+testToString("-1.23e-8", (-0.0000000123));
+
+testToString("1000", (1000));
+testToString("0.00001", (0.00001));
+testToString("1000000000000000100", (1000000000000000128));
+testToString("1e+21", (1000000000000000012800));
+testToString("-1e+21", (-1000000000000000012800));
+testToString("1e-7", (0.0000001));
+testToString("-1e-7", (-0.0000001));
+testToString("1.0000000000000001e+21", (1000000000000000128000));
+testToString("0.000001", (0.000001));
+testToString("1e-7", (0.0000001));
diff --git a/deps/v8/test/mjsunit/number-tostring-func.js b/deps/v8/test/mjsunit/number-tostring-func.js
new file mode 100644
index 0000000000..c64706e703
--- /dev/null
+++ b/deps/v8/test/mjsunit/number-tostring-func.js
@@ -0,0 +1,367 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ----------------------------------------------------------------------
+// toString
+function testToString(a, b) {
+ assertEquals(a, b.toString());
+}
+
+function testToStringP(a, b, c) {
+ assertEquals(a, b.toString(c));
+}
+
+testToString("NaN", (NaN));
+testToString("Infinity", (1/0));
+testToString("-Infinity", (-1/0));
+testToString("0", (0));
+testToString("9", (9));
+testToString("90", (90));
+testToString("90.12", (90.12));
+testToString("0.1", (0.1));
+testToString("0.01", (0.01));
+testToString("0.0123", (0.0123));
+testToString("111111111111111110000", (111111111111111111111));
+testToString("1.1111111111111111e+21", (1111111111111111111111));
+testToString("1.1111111111111111e+22", (11111111111111111111111));
+testToString("0.00001", (0.00001));
+testToString("0.000001", (0.000001));
+testToString("1e-7", (0.0000001));
+testToString("1.2e-7", (0.00000012));
+testToString("1.23e-7", (0.000000123));
+testToString("1e-8", (0.00000001));
+testToString("1.2e-8", (0.000000012));
+testToString("1.23e-8", (0.0000000123));
+
+testToString("0", (-0));
+testToString("-9", (-9));
+testToString("-90", (-90));
+testToString("-90.12", (-90.12));
+testToString("-0.1", (-0.1));
+testToString("-0.01", (-0.01));
+testToString("-0.0123", (-0.0123));
+testToString("-111111111111111110000", (-111111111111111111111));
+testToString("-1.1111111111111111e+21", (-1111111111111111111111));
+testToString("-1.1111111111111111e+22", (-11111111111111111111111));
+testToString("-0.00001", (-0.00001));
+testToString("-0.000001", (-0.000001));
+testToString("-1e-7", (-0.0000001));
+testToString("-1.2e-7", (-0.00000012));
+testToString("-1.23e-7", (-0.000000123));
+testToString("-1e-8", (-0.00000001));
+testToString("-1.2e-8", (-0.000000012));
+testToString("-1.23e-8", (-0.0000000123));
+
+testToString("1000", (1000));
+testToString("0.00001", (0.00001));
+testToString("1000000000000000100", (1000000000000000128));
+testToString("1e+21", (1000000000000000012800));
+testToString("-1e+21", (-1000000000000000012800));
+testToString("1e-7", (0.0000001));
+testToString("-1e-7", (-0.0000001));
+testToString("1.0000000000000001e+21", (1000000000000000128000));
+testToString("0.000001", (0.000001));
+testToString("1e-7", (0.0000001));
+
+testToStringP("NaN", (NaN), 16);
+testToStringP("Infinity", (1/0), 16);
+testToStringP("-Infinity", (-1/0), 16);
+testToStringP("0", (0), 16);
+testToStringP("9", (9), 16);
+testToStringP("5a", (90), 16);
+testToStringP("5a.1eb851eb852", (90.12), 16);
+testToStringP("0.1999999999999a", (0.1), 16);
+testToStringP("0.028f5c28f5c28f6", (0.01), 16);
+testToStringP("0.032617c1bda511a", (0.0123), 16);
+testToStringP("605f9f6dd18bc8000", (111111111111111111111), 16);
+testToStringP("3c3bc3a4a2f75c0000", (1111111111111111111111), 16);
+testToStringP("25a55a46e5da9a00000", (11111111111111111111111), 16);
+testToStringP("0.0000a7c5ac471b4788", (0.00001), 16);
+testToStringP("0.000010c6f7a0b5ed8d", (0.000001), 16);
+testToStringP("0.000001ad7f29abcaf48", (0.0000001), 16);
+testToStringP("0.000002036565348d256", (0.00000012), 16);
+testToStringP("0.0000021047ee22aa466", (0.000000123), 16);
+testToStringP("0.0000002af31dc4611874", (0.00000001), 16);
+testToStringP("0.000000338a23b87483be", (0.000000012), 16);
+testToStringP("0.00000034d3fe36aaa0a2", (0.0000000123), 16);
+
+testToStringP("0", (-0), 16);
+testToStringP("-9", (-9), 16);
+testToStringP("-5a", (-90), 16);
+testToStringP("-5a.1eb851eb852", (-90.12), 16);
+testToStringP("-0.1999999999999a", (-0.1), 16);
+testToStringP("-0.028f5c28f5c28f6", (-0.01), 16);
+testToStringP("-0.032617c1bda511a", (-0.0123), 16);
+testToStringP("-605f9f6dd18bc8000", (-111111111111111111111), 16);
+testToStringP("-3c3bc3a4a2f75c0000", (-1111111111111111111111), 16);
+testToStringP("-25a55a46e5da9a00000", (-11111111111111111111111), 16);
+testToStringP("-0.0000a7c5ac471b4788", (-0.00001), 16);
+testToStringP("-0.000010c6f7a0b5ed8d", (-0.000001), 16);
+testToStringP("-0.000001ad7f29abcaf48", (-0.0000001), 16);
+testToStringP("-0.000002036565348d256", (-0.00000012), 16);
+testToStringP("-0.0000021047ee22aa466", (-0.000000123), 16);
+testToStringP("-0.0000002af31dc4611874", (-0.00000001), 16);
+testToStringP("-0.000000338a23b87483be", (-0.000000012), 16);
+testToStringP("-0.00000034d3fe36aaa0a2", (-0.0000000123), 16);
+
+testToString("4294967296", Math.pow(2,32));
+testToStringP("ffffffff", (Math.pow(2,32)-1), 16);
+testToStringP("11111111111111111111111111111111", (Math.pow(2,32)-1), 2);
+testToStringP("5yc1z", (10000007), 36);
+testToStringP("0", (0), 36);
+testToStringP("0", (0), 16);
+testToStringP("0", (0), 10);
+testToStringP("0", (0), 8);
+testToStringP("0", (0), 2);
+testToStringP("100000000000000000000000000000000", Math.pow(2,32), 2);
+testToStringP("100000000000000000000000000000001", (Math.pow(2,32) + 1), 2);
+testToStringP("100000000000080", (0x100000000000081), 16);
+testToStringP("1000000000000100", (-(-'0x1000000000000081')), 16);
+testToStringP("1000000000000000", (-(-'0x1000000000000080')), 16);
+testToStringP("1000000000000000", (-(-'0x100000000000007F')), 16);
+testToStringP("100000000000000000000000000000000000000000000000010000000", (0x100000000000081), 2);
+testToStringP("-11111111111111111111111111111111", (-(Math.pow(2,32)-1)), 2);
+testToStringP("-5yc1z", (-10000007), 36);
+testToStringP("-100000000000000000000000000000000", (-Math.pow(2,32)), 2);
+testToStringP("-100000000000000000000000000000001", (-(Math.pow(2,32) + 1)), 2);
+testToStringP("-100000000000080", (-0x100000000000081), 16);
+testToStringP("-100000000000000000000000000000000000000000000000010000000", (-0x100000000000081), 2);
+testToStringP("8.8", (8.5), 16);
+testToStringP("-8.8", (-8.5), 16);
+
+// ----------------------------------------------------------------------
+// toFixed
+function testToFixed(a, b, c) {
+ assertEquals(a, b.toFixed(c));
+}
+
+testToFixed("NaN", (NaN), (2));
+testToFixed("Infinity", (1/0), (2));
+testToFixed("-Infinity", (-1/0), (2));
+
+testToFixed("1.1111111111111111e+21", (1111111111111111111111), (8));
+testToFixed("0.1", (0.1), (1));
+testToFixed("0.10", (0.1), (2));
+testToFixed("0.100", (0.1), (3));
+testToFixed("0.01", (0.01), (2));
+testToFixed("0.010", (0.01), (3));
+testToFixed("0.0100", (0.01), (4));
+testToFixed("0.00", (0.001), (2));
+testToFixed("0.001", (0.001), (3));
+testToFixed("0.0010", (0.001), (4));
+testToFixed("1.0000", (1), (4));
+testToFixed("1.0", (1), (1));
+testToFixed("1", (1), (0));
+testToFixed("12", (12), (0));
+testToFixed("1", (1.1), (0));
+testToFixed("12", (12.1), (0));
+testToFixed("1", (1.12), (0));
+testToFixed("12", (12.12), (0));
+testToFixed("0.0000006", (0.0000006), (7));
+testToFixed("0.00000006", (0.00000006), (8));
+testToFixed("0.000000060", (0.00000006), (9));
+testToFixed("0.0000000600", (0.00000006), (10));
+testToFixed("0", (0), (0));
+testToFixed("0.0", (0), (1));
+testToFixed("0.00", (0), (2));
+
+testToFixed("-1.1111111111111111e+21", (-1111111111111111111111), (8));
+testToFixed("-0.1", (-0.1), (1));
+testToFixed("-0.10", (-0.1), (2));
+testToFixed("-0.100", (-0.1), (3));
+testToFixed("-0.01", (-0.01), (2));
+testToFixed("-0.010", (-0.01), (3));
+testToFixed("-0.0100", (-0.01), (4));
+testToFixed("-0.00", (-0.001), (2));
+testToFixed("-0.001", (-0.001), (3));
+testToFixed("-0.0010", (-0.001), (4));
+testToFixed("-1.0000", (-1), (4));
+testToFixed("-1.0", (-1), (1));
+testToFixed("-1", (-1), (0));
+testToFixed("-1", (-1.1), (0));
+testToFixed("-12", (-12.1), (0));
+testToFixed("-1", (-1.12), (0));
+testToFixed("-12", (-12.12), (0));
+testToFixed("-0.0000006", (-0.0000006), (7));
+testToFixed("-0.00000006", (-0.00000006), (8));
+testToFixed("-0.000000060", (-0.00000006), (9));
+testToFixed("-0.0000000600", (-0.00000006), (10));
+testToFixed("0", (-0), (0));
+testToFixed("0.0", (-0), (1));
+testToFixed("0.00", (-0), (2));
+
+testToFixed("0.00001", (0.00001), (5));
+testToFixed("0.00000000000000000010", (0.0000000000000000001), (20));
+testToFixed("0.00001000000000000", (0.00001), (17));
+testToFixed("1.00000000000000000", (1), (17));
+testToFixed("100000000000000128.0", (100000000000000128), (1));
+testToFixed("10000000000000128.00", (10000000000000128), (2));
+testToFixed("10000000000000128.00000000000000000000", (10000000000000128), (20));
+testToFixed("-42.000", (-42), (3));
+testToFixed("-0.00000000000000000010", (-0.0000000000000000001), (20));
+testToFixed("0.12312312312312299889", (0.123123123123123), (20));
+
+assertEquals("-1000000000000000128", (-1000000000000000128).toFixed());
+assertEquals("0", (0).toFixed());
+assertEquals("1000000000000000128", (1000000000000000128).toFixed());
+assertEquals("1000", (1000).toFixed());
+assertEquals("0", (0.00001).toFixed());
+// Test that we round up even when the last digit generated is even.
+// dtoa does not do this in its original form.
+assertEquals("1", 0.5.toFixed(0), "0.5.toFixed(0)");
+assertEquals("-1", (-0.5).toFixed(0), "(-0.5).toFixed(0)");
+assertEquals("1.3", 1.25.toFixed(1), "1.25.toFixed(1)");
+// This is bizare, but Spidermonkey and KJS behave the same.
+assertEquals("234.2040", (234.20405).toFixed(4), "234.2040.toFixed(4)");
+assertEquals("234.2041", (234.2040506).toFixed(4));
+
+// ----------------------------------------------------------------------
+// toExponential
+function testToExponential(a, b) {
+ assertEquals(a, b.toExponential());
+}
+
+function testToExponentialP(a, b, c) {
+ assertEquals(a, b.toExponential(c));
+}
+
+testToExponential("1e+0", (1));
+testToExponential("1.1e+1", (11));
+testToExponential("1.12e+2", (112));
+testToExponential("1e-1", (0.1));
+testToExponential("1.1e-1", (0.11));
+testToExponential("1.12e-1", (0.112));
+testToExponential("-1e+0", (-1));
+testToExponential("-1.1e+1", (-11));
+testToExponential("-1.12e+2", (-112));
+testToExponential("-1e-1", (-0.1));
+testToExponential("-1.1e-1", (-0.11));
+testToExponential("-1.12e-1", (-0.112));
+testToExponential("0e+0", (0));
+testToExponential("1.12356e-4", (0.000112356));
+testToExponential("-1.12356e-4", (-0.000112356));
+
+testToExponentialP("1e+0", (1), (0));
+testToExponentialP("1e+1", (11), (0));
+testToExponentialP("1e+2", (112), (0));
+testToExponentialP("1.0e+0", (1), (1));
+testToExponentialP("1.1e+1", (11), (1));
+testToExponentialP("1.1e+2", (112), (1));
+testToExponentialP("1.00e+0", (1), (2));
+testToExponentialP("1.10e+1", (11), (2));
+testToExponentialP("1.12e+2", (112), (2));
+testToExponentialP("1.000e+0", (1), (3));
+testToExponentialP("1.100e+1", (11), (3));
+testToExponentialP("1.120e+2", (112), (3));
+testToExponentialP("1e-1", (0.1), (0));
+testToExponentialP("1e-1", (0.11), (0));
+testToExponentialP("1e-1", (0.112), (0));
+testToExponentialP("1.0e-1", (0.1), (1));
+testToExponentialP("1.1e-1", (0.11), (1));
+testToExponentialP("1.1e-1", (0.112), (1));
+testToExponentialP("1.00e-1", (0.1), (2));
+testToExponentialP("1.10e-1", (0.11), (2));
+testToExponentialP("1.12e-1", (0.112), (2));
+testToExponentialP("1.000e-1", (0.1), (3));
+testToExponentialP("1.100e-1", (0.11), (3));
+testToExponentialP("1.120e-1", (0.112), (3));
+
+testToExponentialP("-1e+0", (-1), (0));
+testToExponentialP("-1e+1", (-11), (0));
+testToExponentialP("-1e+2", (-112), (0));
+testToExponentialP("-1.0e+0", (-1), (1));
+testToExponentialP("-1.1e+1", (-11), (1));
+testToExponentialP("-1.1e+2", (-112), (1));
+testToExponentialP("-1.00e+0", (-1), (2));
+testToExponentialP("-1.10e+1", (-11), (2));
+testToExponentialP("-1.12e+2", (-112), (2));
+testToExponentialP("-1.000e+0", (-1), (3));
+testToExponentialP("-1.100e+1", (-11), (3));
+testToExponentialP("-1.120e+2", (-112), (3));
+testToExponentialP("-1e-1", (-0.1), (0));
+testToExponentialP("-1e-1", (-0.11), (0));
+testToExponentialP("-1e-1", (-0.112), (0));
+testToExponentialP("-1.0e-1", (-0.1), (1));
+testToExponentialP("-1.1e-1", (-0.11), (1));
+testToExponentialP("-1.1e-1", (-0.112), (1));
+testToExponentialP("-1.00e-1", (-0.1), (2));
+testToExponentialP("-1.10e-1", (-0.11), (2));
+testToExponentialP("-1.12e-1", (-0.112), (2));
+testToExponentialP("-1.000e-1", (-0.1), (3));
+testToExponentialP("-1.100e-1", (-0.11), (3));
+testToExponentialP("-1.120e-1", (-0.112), (3));
+
+testToExponentialP("NaN", (NaN), (2));
+testToExponentialP("Infinity", (Infinity), (2));
+testToExponentialP("-Infinity", (-Infinity), (2));
+testToExponentialP("1e+0", (1), (0));
+testToExponentialP("0.00e+0", (0), (2));
+testToExponentialP("1e+1", (11.2356), (0));
+testToExponentialP("1.1236e+1", (11.2356), (4));
+testToExponentialP("1.1236e-4", (0.000112356), (4));
+testToExponentialP("-1.1236e-4", (-0.000112356), (4));
+
+// ----------------------------------------------------------------------
+// toPrecision
+function testToPrecision(a, b, c) {
+ assertEquals(a, b.toPrecision(c));
+}
+
+testToPrecision("NaN", (NaN), (1));
+testToPrecision("Infinity", (Infinity), (2));
+testToPrecision("-Infinity", (-Infinity), (2));
+testToPrecision("0.000555000000000000", (0.000555), (15));
+testToPrecision("5.55000000000000e-7", (0.000000555), (15));
+testToPrecision("-5.55000000000000e-7", (-0.000000555), (15));
+testToPrecision("1e+8", (123456789), (1));
+testToPrecision("123456789", (123456789), (9));
+testToPrecision("1.2345679e+8", (123456789), (8));
+testToPrecision("1.234568e+8", (123456789), (7));
+testToPrecision("-1.234568e+8", (-123456789), (7));
+testToPrecision("-1.2e-9", Number(-.0000000012345), (2));
+testToPrecision("-1.2e-8", Number(-.000000012345), (2));
+testToPrecision("-1.2e-7", Number(-.00000012345), (2));
+testToPrecision("-0.0000012", Number(-.0000012345), (2));
+testToPrecision("-0.000012", Number(-.000012345), (2));
+testToPrecision("-0.00012", Number(-.00012345), (2));
+testToPrecision("-0.0012", Number(-.0012345), (2));
+testToPrecision("-0.012", Number(-.012345), (2));
+testToPrecision("-0.12", Number(-.12345), (2));
+testToPrecision("-1.2", Number(-1.2345), (2));
+testToPrecision("-12", Number(-12.345), (2));
+testToPrecision("-1.2e+2", Number(-123.45), (2));
+testToPrecision("-1.2e+3", Number(-1234.5), (2));
+testToPrecision("-1.2e+4", Number(-12345), (2));
+testToPrecision("-1.235e+4", Number(-12345.67), (4));
+testToPrecision("-1.234e+4", Number(-12344.67), (4));
+// Test that we round up even when the last digit generated is even.
+// dtoa does not do this in its original form.
+assertEquals("1.3", 1.25.toPrecision(2), "1.25.toPrecision(2)");
+assertEquals("1.4", 1.35.toPrecision(2), "1.35.toPrecision(2)");
+
+
+
diff --git a/deps/v8/test/mjsunit/opt-elements-kind.js b/deps/v8/test/mjsunit/opt-elements-kind.js
index 83ad702c2d..fe6b8b9bfb 100644
--- a/deps/v8/test/mjsunit/opt-elements-kind.js
+++ b/deps/v8/test/mjsunit/opt-elements-kind.js
@@ -40,6 +40,11 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
+// Reset the GC stress mode to be off. Needed because AllocationMementos only
+// live for one gc, so a gc that happens in certain fragile areas of the test
+// can break assumptions.
+%SetFlags("--gc-interval=-1")
+
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index 6d3c8176af..8d43377321 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -40,6 +40,11 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
+// Reset the GC stress mode to be off. Needed because AllocationMementos only
+// live for one gc, so a gc that happens in certain fragile areas of the test
+// can break assumptions.
+%SetFlags("--gc-interval=-1")
+
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
if (support_smi_only_arrays) {
diff --git a/deps/v8/test/mjsunit/regexp-global.js b/deps/v8/test/mjsunit/regexp-global.js
index 093dba17c1..8501699458 100644
--- a/deps/v8/test/mjsunit/regexp-global.js
+++ b/deps/v8/test/mjsunit/regexp-global.js
@@ -214,7 +214,7 @@ function test_match(result_expectation,
// Test for different number of matches.
-for (var m = 0; m < 200; m++) {
+for (var m = 0; m < 33; m++) {
// Create string that matches m times.
var subject = "";
var test_1_expectation = "";
diff --git a/deps/v8/test/mjsunit/regress/regress-1713.js b/deps/v8/test/mjsunit/regress/regress-1713.js
deleted file mode 100644
index 0af1144a15..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1713.js
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --always-compact --expose-gc
-
-var O = { get f() { return 0; } };
-
-var CODE = [];
-
-var R = [];
-
-function Allocate4Kb(N) {
- var arr = [];
- do {arr.push(new Array(1024));} while (--N > 0);
- return arr;
-}
-
-function AllocateXMb(X) {
- return Allocate4Kb((1024 * X) / 4);
-}
-
-function Node(v, next) { this.v = v; this.next = next; }
-
-Node.prototype.execute = function (O) {
- var n = this;
- while (n.next !== null) n = n.next;
- n.v(O);
-};
-
-function LongList(N, x) {
- if (N == 0) return new Node(x, null);
- return new Node(new Array(1024), LongList(N - 1, x));
-}
-
-var L = LongList(1024, function (O) {
- for (var i = 0; i < 5; i++) O.f;
-});
-
-
-
-function Incremental(O, x) {
- if (!x) {
- return;
- }
- function CreateCode(i) {
- var f = new Function("return O.f_" + i);
- CODE.push(f);
- f(); // compile
- f(); // compile
- f(); // compile
- }
-
- for (var i = 0; i < 1e4; i++) CreateCode(i);
- gc();
- gc();
- gc();
-
- print(">>> 1 <<<");
-
- L.execute(O);
-
- try {} catch (e) {}
-
- L = null;
- print(">>> 2 <<<");
- AllocateXMb(8);
- //rint("1");
- //llocateXMb(8);
- //rint("1");
- //llocateXMb(8);
-
-}
-
-function foo(O, x) {
- Incremental(O, x);
-
- print('f');
-
- for (var i = 0; i < 5; i++) O.f;
-
-
- print('g');
-
- bar(x);
-}
-
-function bar(x) {
- if (!x) return;
- %DeoptimizeFunction(foo);
- AllocateXMb(8);
- AllocateXMb(8);
-}
-
-var O1 = {};
-var O2 = {};
-var O3 = {};
-var O4 = {f:0};
-
-foo(O1, false);
-foo(O2, false);
-foo(O3, false);
-%OptimizeFunctionOnNextCall(foo);
-foo(O4, true);
diff --git a/deps/v8/test/mjsunit/regress/regress-1713b.js b/deps/v8/test/mjsunit/regress/regress-1713b.js
deleted file mode 100644
index cc16bf5119..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1713b.js
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --always-compact --expose-gc
-
-var O = { get f() { return 0; } };
-
-var CODE = [];
-
-var R = [];
-
-function Allocate4Kb(N) {
- var arr = [];
- do {arr.push(new Array(1024));} while (--N > 0);
- return arr;
-}
-
-function AllocateXMb(X) {
- return Allocate4Kb((1024 * X) / 4);
-}
-
-function Node(v, next) { this.v = v; this.next = next; }
-
-Node.prototype.execute = function (O) {
- var n = this;
- while (n.next !== null) n = n.next;
- n.v(O);
-};
-
-function LongList(N, x) {
- if (N == 0) return new Node(x, null);
- return new Node(new Array(1024), LongList(N - 1, x));
-}
-
-var L = LongList(1024, function (O) {
- for (var i = 0; i < 5; i++) O.f;
-});
-
-
-
-%NeverOptimizeFunction(Incremental);
-function Incremental(O, x) {
- if (!x) {
- return;
- }
- function CreateCode(i) {
- var f = new Function("return O.f_" + i);
- CODE.push(f);
- f(); // compile
- f(); // compile
- f(); // compile
- }
-
- for (var i = 0; i < 1e4; i++) CreateCode(i);
- gc();
- gc();
- gc();
-
- print(">>> 1 <<<");
-
- L.execute(O);
-
- L = null;
- print(">>> 2 <<<");
- AllocateXMb(8);
- //rint("1");
- //llocateXMb(8);
- //rint("1");
- //llocateXMb(8);
-
-}
-
-function foo(O, x) {
- Incremental(O, x);
-
- print('f');
-
- for (var i = 0; i < 5; i++) O.f;
-
-
- print('g');
-
- bar(x);
-}
-
-function bar(x) {
- if (!x) return;
- %DeoptimizeFunction(foo);
- AllocateXMb(8);
- AllocateXMb(8);
-}
-
-var O1 = {};
-var O2 = {};
-var O3 = {};
-var O4 = {f:0};
-
-foo(O1, false);
-foo(O2, false);
-foo(O3, false);
-%OptimizeFunctionOnNextCall(foo);
-foo(O4, true);
diff --git a/deps/v8/test/mjsunit/regress/regress-2612.js b/deps/v8/test/mjsunit/regress/regress-2612.js
index 06db07733d..ac6028f14c 100644
--- a/deps/v8/test/mjsunit/regress/regress-2612.js
+++ b/deps/v8/test/mjsunit/regress/regress-2612.js
@@ -57,11 +57,11 @@ function varname(i) {
var source = "var ";
-for (var i = 0; i < 1000; i++) {
+for (var i = 0; i < 750; i++) {
source += [varname(i), "=", rand(), ","].join("");
}
-for (var i = 1000; i < 100000; i++) {
+for (var i = 750; i < 3000; i++) {
source += [varname(i), "=",
varname(randi(i)), "+",
varname(randi(i)), ","].join("");
@@ -73,4 +73,3 @@ var f = new Function(source);
f();
%OptimizeFunctionOnNextCall(f);
f();
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2931.js b/deps/v8/test/mjsunit/regress/regress-2931.js
new file mode 100644
index 0000000000..a2ea912682
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2931.js
@@ -0,0 +1,34 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Typed array constructors should be immune from changes to
+// value of ArrayBuffer on global object.
+// See http://code.google.com/p/v8/issues/detail?id=294
+
+this.ArrayBuffer = function() { throw Error('BAM'); };
+var u8 = new Uint8Array(100);
+assertSame(100, u8.byteLength);
diff --git a/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js b/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
new file mode 100644
index 0000000000..0b4af75424
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var o = { a: 0 };
+
+function f(x) { return -o.a + 0; };
+
+assertEquals("Infinity", String(1/f()));
+assertEquals("Infinity", String(1/f()));
+%OptimizeFunctionOnNextCall(f);
+assertEquals("Infinity", String(1/f()));
+
diff --git a/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js b/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
new file mode 100644
index 0000000000..129e1980a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-array-pop-nonconfigurable.js
@@ -0,0 +1,31 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [];
+Object.defineProperty(a, 0, {});
+assertThrows(function() { a.pop(); });
+
diff --git a/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js b/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js
new file mode 100644
index 0000000000..c6cbaf7ebf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-binop-nosse2.js
@@ -0,0 +1,168 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noenable-sse2
+
+// general tests
+var e31 = Math.pow(2, 31);
+
+assertEquals(-e31, -1*e31);
+assertEquals(e31, -1*e31*(-1));
+assertEquals(e31, -1*-e31);
+assertEquals(e31, -e31*(-1));
+
+var x = {toString : function() {return 1}}
+function add(a,b){return a+b;}
+add(1,x);
+add(1,x);
+%OptimizeFunctionOnNextCall(add);
+add(1,x);
+x.toString = function() {return "2"};
+
+assertEquals(add(1,x), "12");
+
+// Test the correct placement of the simulates in TruncateToNumber:
+function Checker() {
+ this.str = "1";
+ var toStringCalled = 0;
+ var toStringExpected = 0;
+ this.toString = function() {
+ toStringCalled++;
+ return this.str;
+ };
+ this.check = function() {
+ toStringExpected++;
+ assertEquals(toStringExpected, toStringCalled);
+ };
+};
+var left = new Checker();
+var right = new Checker();
+
+function test(fun,check_fun,a,b,does_throw) {
+ left.str = a;
+ right.str = b;
+ try {
+ assertEquals(check_fun(a,b), fun(left, right));
+ assertTrue(!does_throw);
+ } catch(e) {
+ if (e instanceof TypeError) {
+ assertTrue(!!does_throw);
+ } else {
+ throw e;
+ }
+ } finally {
+ left.check();
+ if (!does_throw || does_throw>1) {
+ right.check();
+ }
+ }
+}
+
+function minus(a,b) { return a-b };
+function check_minus(a,b) { return a-b };
+function mod(a,b) { return a%b };
+function check_mod(a,b) { return a%b };
+
+test(minus,check_minus,1,2);
+// Bailout on left
+test(minus,check_minus,1<<30,1);
+// Bailout on right
+test(minus,check_minus,1,1<<30);
+// Bailout on result
+test(minus,check_minus,1<<30,-(1<<30));
+
+// Some more interesting things
+test(minus,check_minus,1,1.4);
+test(minus,check_minus,1.3,4);
+test(minus,check_minus,1.3,1.4);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,undefined);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,true);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,null);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,"");
+test(minus,check_minus,1,2);
+
+// Throw on left
+test(minus,check_minus,{},1,1);
+// Throw on right
+test(minus,check_minus,1,{},2);
+// Throw both
+test(minus,check_minus,{},{},1);
+
+test(minus,check_minus,1,2);
+
+// Now with optimized code
+test(mod,check_mod,1,2);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1,2);
+
+test(mod,check_mod,1<<30,1);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1<<30,1);
+test(mod,check_mod,1,1<<30);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1,1<<30);
+test(mod,check_mod,1<<30,-(1<<30));
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1<<30,-(1<<30));
+
+test(mod,check_mod,1,{},2);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1,{},2);
+
+test(mod,check_mod,1,2);
+
+
+// test oddballs
+function t1(a, b) {return a-b}
+assertEquals(t1(1,2), 1-2);
+assertEquals(t1(2,true), 2-1);
+assertEquals(t1(false,2), 0-2);
+assertEquals(t1(1,2.4), 1-2.4);
+assertEquals(t1(1.3,2.4), 1.3-2.4);
+assertEquals(t1(true,2.4), 1-2.4);
+assertEquals(t1(1,undefined), 1-NaN);
+assertEquals(t1(1,1<<30), 1-(1<<30));
+assertEquals(t1(1,2), 1-2);
+
+function t2(a, b) {return a/b}
+assertEquals(t2(1,2), 1/2);
+assertEquals(t2(null,2), 0/2);
+assertEquals(t2(null,-2), 0/-2);
+assertEquals(t2(2,null), 2/0);
+assertEquals(t2(-2,null), -2/0);
+assertEquals(t2(1,2.4), 1/2.4);
+assertEquals(t2(1.3,2.4), 1.3/2.4);
+assertEquals(t2(null,2.4), 0/2.4);
+assertEquals(t2(1.3,null), 1.3/0);
+assertEquals(t2(undefined,2), NaN/2);
+assertEquals(t2(1,1<<30), 1/(1<<30));
+assertEquals(t2(1,2), 1/2);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-binop.js b/deps/v8/test/mjsunit/regress/regress-binop.js
new file mode 100644
index 0000000000..7a8b41924d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-binop.js
@@ -0,0 +1,181 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// general tests
+var e31 = Math.pow(2, 31);
+
+assertEquals(-e31, -1*e31);
+assertEquals(e31, -1*e31*(-1));
+assertEquals(e31, -1*-e31);
+assertEquals(e31, -e31*(-1));
+
+var x = {toString : function() {return 1}}
+function add(a,b){return a+b;}
+add(1,x);
+add(1,x);
+%OptimizeFunctionOnNextCall(add);
+add(1,x);
+x.toString = function() {return "2"};
+
+assertEquals(add(1,x), "12");
+
+// Test the correct placement of the simulates in TruncateToNumber:
+function Checker() {
+ this.str = "1";
+ var toStringCalled = 0;
+ var toStringExpected = 0;
+ this.toString = function() {
+ toStringCalled++;
+ return this.str;
+ };
+ this.check = function() {
+ toStringExpected++;
+ assertEquals(toStringExpected, toStringCalled);
+ };
+};
+var left = new Checker();
+var right = new Checker();
+
+function test(fun,check_fun,a,b,does_throw) {
+ left.str = a;
+ right.str = b;
+ try {
+ assertEquals(check_fun(a,b), fun(left, right));
+ assertTrue(!does_throw);
+ } catch(e) {
+ if (e instanceof TypeError) {
+ assertTrue(!!does_throw);
+ } else {
+ throw e;
+ }
+ } finally {
+ left.check();
+ if (!does_throw || does_throw>1) {
+ right.check();
+ }
+ }
+}
+
+function minus(a,b) { return a-b };
+function check_minus(a,b) { return a-b };
+function mod(a,b) { return a%b };
+function check_mod(a,b) { return a%b };
+
+test(minus,check_minus,1,2);
+// Bailout on left
+test(minus,check_minus,1<<30,1);
+// Bailout on right
+test(minus,check_minus,1,1<<30);
+// Bailout on result
+test(minus,check_minus,1<<30,-(1<<30));
+
+// Some more interesting things
+test(minus,check_minus,1,1.4);
+test(minus,check_minus,1.3,4);
+test(minus,check_minus,1.3,1.4);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,undefined);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,true);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,null);
+test(minus,check_minus,1,2);
+test(minus,check_minus,1,"");
+test(minus,check_minus,1,2);
+
+// Throw on left
+test(minus,check_minus,{},1,1);
+// Throw on right
+test(minus,check_minus,1,{},2);
+// Throw both
+test(minus,check_minus,{},{},1);
+
+test(minus,check_minus,1,2);
+
+// Now with optimized code
+test(mod,check_mod,1,2);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1,2);
+
+test(mod,check_mod,1<<30,1);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1<<30,1);
+test(mod,check_mod,1,1<<30);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1,1<<30);
+test(mod,check_mod,1<<30,-(1<<30));
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1<<30,-(1<<30));
+
+test(mod,check_mod,1,{},2);
+%OptimizeFunctionOnNextCall(mod);
+test(mod,check_mod,1,{},2);
+
+test(mod,check_mod,1,2);
+
+
+// test oddballs
+function t1(a, b) {return a-b}
+assertEquals(t1(1,2), 1-2);
+assertEquals(t1(2,true), 2-1);
+assertEquals(t1(false,2), 0-2);
+assertEquals(t1(1,2.4), 1-2.4);
+assertEquals(t1(1.3,2.4), 1.3-2.4);
+assertEquals(t1(true,2.4), 1-2.4);
+assertEquals(t1(1,undefined), 1-NaN);
+assertEquals(t1(1,1<<30), 1-(1<<30));
+assertEquals(t1(1,2), 1-2);
+
+function t2(a, b) {return a/b}
+assertEquals(t2(1,2), 1/2);
+assertEquals(t2(null,2), 0/2);
+assertEquals(t2(null,-2), 0/-2);
+assertEquals(t2(2,null), 2/0);
+assertEquals(t2(-2,null), -2/0);
+assertEquals(t2(1,2.4), 1/2.4);
+assertEquals(t2(1.3,2.4), 1.3/2.4);
+assertEquals(t2(null,2.4), 0/2.4);
+assertEquals(t2(1.3,null), 1.3/0);
+assertEquals(t2(undefined,2), NaN/2);
+assertEquals(t2(1,1<<30), 1/(1<<30));
+assertEquals(t2(1,2), 1/2);
+
+
+// Assert that the hole is not truncated to nan for string add.
+function string_add(a,i) {
+ var d = [0.1, ,0.3];
+ return a + d[i];
+}
+
+string_add(1.1, 0);
+string_add("", 0);
+%OptimizeFunctionOnNextCall(string_add);
+string_add(1.1, 0);
+// There comes the hole
+assertEquals("undefined", string_add("", 1));
diff --git a/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js b/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js
new file mode 100644
index 0000000000..0f8ffe307d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js
@@ -0,0 +1,58 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var left = 1.5;
+var right;
+
+var keepalive;
+
+function foo() {
+ // Fill XMM registers with cruft.
+ var a1 = Math.sin(1) + 10;
+ var a2 = a1 + 1;
+ var a3 = a2 + 1;
+ var a4 = a3 + 1;
+ var a5 = a4 + 1;
+ var a6 = a5 + 1;
+ keepalive = [a1, a2, a3, a4, a5, a6];
+
+ // Actual test.
+ if (left < right) return "ok";
+ return "bad";
+}
+
+function prepare(base) {
+ right = 0.5 * base;
+}
+
+prepare(21);
+assertEquals("ok", foo());
+assertEquals("ok", foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("ok", foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-305309.js b/deps/v8/test/mjsunit/regress/regress-crbug-305309.js
new file mode 100644
index 0000000000..cd89bedc11
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-305309.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function BadProto() {
+ this.constant_function = function() {};
+ this.one = 1;
+ this.two = 2;
+}
+var b1 = new BadProto();
+var b2 = new BadProto();
+
+function Ctor() {}
+Ctor.prototype = b1;
+var a = new Ctor();
+
+function Two(x) {
+ return x.two;
+}
+assertEquals(2, Two(a));
+assertEquals(2, Two(a));
+b2.constant_function = "no longer constant!";
+%OptimizeFunctionOnNextCall(Two);
+assertEquals(2, Two(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-306851.js b/deps/v8/test/mjsunit/regress/regress-crbug-306851.js
new file mode 100644
index 0000000000..77b711a656
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-306851.js
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function Counter() {
+ this.value = 0;
+};
+
+Object.defineProperty(Counter.prototype, 'count', {
+ get: function() { return this.value; },
+ set: function(value) { this.value = value; }
+});
+
+var obj = new Counter();
+
+function bummer() {
+ obj.count++;
+ return obj.count;
+}
+
+assertEquals(1, bummer());
+assertEquals(2, bummer());
+assertEquals(3, bummer());
+%OptimizeFunctionOnNextCall(bummer);
+assertEquals(4, bummer());
+assertEquals(5, bummer());
+assertEquals(6, bummer());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-84186.js b/deps/v8/test/mjsunit/regress/regress-crbug-309623.js
index 865bf9eb91..12473c7947 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-84186.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-309623.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,17 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test that the expected string is parsed in the json parser when the length
-// is so big that the string can't fit in new space, and it includes special
-// characters.
+// Flags: --allow-natives-syntax
-var json = '{"key":"';
-var key = '';
-var expected = '';
-for(var i = 0; i < 60000; i++) {
- key = key + "TESTING" + i + "\\n";
- expected = expected + "TESTING" + i + "\n";
+var u = new Uint32Array(2);
+u[0] = 1;
+u[1] = 0xEE6B2800;
+
+var a = [0, 1, 2];
+a[0] = 0; // Kill the COW.
+assertTrue(%HasFastSmiElements(a));
+
+function foo(i) {
+ a[0] = u[i];
+ return a[0];
}
-json = json + key + '"}';
-var out = JSON.parse(json);
-assertEquals(expected, out.key);
+
+assertEquals(u[0], foo(0));
+assertEquals(u[0], foo(0));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(u[1], foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
index 58a0b1c869..b08a94257c 100644
--- a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -27,7 +27,7 @@
// Flags: --fold-constants --nodead-code-elimination
// Flags: --expose-gc --allow-natives-syntax
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=300
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -39,12 +39,14 @@ function test(fun) {
fun();
// Mark for concurrent optimization.
%OptimizeFunctionOnNextCall(fun, "concurrent");
- //Trigger optimization in the background.
+ // Kick off recompilation.
fun();
- //Tenure cons string.
+ // Tenure cons string after compile graph has been created.
gc();
- // In the mean time, concurrent recompiling is not complete yet.
+ // In the mean time, concurrent recompiling is still blocked.
assertUnoptimized(fun, "no sync");
+ // Let concurrent recompilation proceed.
+ %UnblockConcurrentRecompilation();
// Concurrent recompilation eventually finishes, embeds tenured cons string.
assertOptimized(fun, "sync");
// Visit embedded cons string during mark compact.
diff --git a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js b/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
index 8bf95ec5aa..c637be5497 100644
--- a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --concurrent-recompilation --concurrent-recompilation-delay=100
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -60,8 +60,14 @@ f();
%OptimizeFunctionOnNextCall(f, "concurrent"); // Mark with builtin.
f(); // Kick off concurrent recompilation.
+// After compile graph has been created...
Debug.setListener(listener); // Activate debugger.
Debug.setBreakPoint(f, 2, 0); // Force deopt.
+
+// At this point, concurrent recompilation is still being blocked.
+assertUnoptimized(f, "no sync");
+// Let concurrent recompilation proceed.
+%UnblockConcurrentRecompilation();
// Sync with optimization thread. But no optimized code is installed.
assertUnoptimized(f, "sync");
diff --git a/deps/v8/test/mjsunit/regress/regress-parse-object-literal.js b/deps/v8/test/mjsunit/regress/regress-parse-object-literal.js
new file mode 100644
index 0000000000..96d63c2c12
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-parse-object-literal.js
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Should throw, not crash.
+assertThrows("var o = { get /*space*/ () {} }");
diff --git a/deps/v8/test/mjsunit/regress/regress-parse-use-strict.js b/deps/v8/test/mjsunit/regress/regress-parse-use-strict.js
new file mode 100644
index 0000000000..9dd0f4c97c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-parse-use-strict.js
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Filler long enough to trigger lazy parsing.
+var filler = "/*" + new Array(1024).join('x') + "*/";
+
+// Snippet trying to switch to strict mode.
+var strict = '"use strict"; with({}) {}';
+
+// Test switching to strict mode after string literal.
+assertThrows('function f() { "use sanity";' + strict + '}');
+assertThrows('function f() { "use sanity";' + strict + filler + '}');
+
+// Test switching to strict mode after function declaration.
+// We must use eval instead of assertDoesNotThrow here to make sure that
+// lazy parsing is triggered. Otherwise the bug won't reproduce.
+eval('function f() { function g() {}' + strict + '}');
+eval('function f() { function g() {}' + strict + filler + '}');
diff --git a/deps/v8/src/v8preparserdll-main.cc b/deps/v8/test/mjsunit/regress/regress-polymorphic-load.js
index c0344d344a..2545e85f60 100644
--- a/deps/v8/src/v8preparserdll-main.cc
+++ b/deps/v8/test/mjsunit/regress/regress-polymorphic-load.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <windows.h>
+// Flags: --allow-natives-syntax
-#include "../include/v8-preparser.h"
-
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
+function f(o) {
+ return o.x;
}
+
+var o1 = {x:1};
+var o2 = {__proto__: {x:2}};
+
+f(o2);
+f(o2);
+f(o2);
+f(o1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f(o1));
+assertEquals(2, f(o2));
diff --git a/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js b/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
index 2fad5ca0d2..a9c20ec844 100644
--- a/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
+++ b/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --concurrent-recompilation-delay=300
+// Flags: --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@@ -46,17 +46,22 @@ function bar() {
}
foo();
-// Mark and trigger concurrent optimization.
+// Mark and kick off recompilation.
%OptimizeFunctionOnNextCall(foo, "concurrent");
foo();
// Set break points on an unrelated function. This clears both optimized
// and (shared) unoptimized code on foo, and sets both to lazy-compile builtin.
// Clear the break point immediately after to deactivate the debugger.
+// Do all of this after compile graph has been created.
Debug.setBreakPoint(bar, 0, 0);
Debug.clearAllBreakPoints();
+// At this point, concurrent recompilation is still blocked.
+assertUnoptimized(foo, "no sync");
+// Let concurrent recompilation proceed.
+%UnblockConcurrentRecompilation();
+
// Install optimized code when concurrent optimization finishes.
// This needs to be able to deal with shared code being a builtin.
assertUnoptimized(foo, "sync");
-
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index 4e8718eb3f..5ed404025f 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -345,8 +345,6 @@ function testOneArrayType(allocator) {
-Infinity,
expected_array_value(7));
- assertOptimized(test_various_stores);
-
// Make sure that we haven't converted from fast double.
assertTrue(%HasFastDoubleElements(large_array));
}
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 9878730b2c..b27e991b98 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -42,807 +42,816 @@
# debugging.
# --------------------------------------------------------------------
-prefix mozilla
-def FAIL_OK = FAIL, OKAY
+[
+[ALWAYS, {
+ ##################### NEEDS INVESTIGATION ##############
+
+ # BUG(2893): These tests started to fail after i18n support was turned on.
+ # Need to investigate why.
+ 'ecma_3/Number/15.7.4.3-02': [PASS, FAIL],
+ 'ecma_3/Date/15.9.5.5-02': [PASS, FAIL],
+
+ ##################### SKIPPED TESTS #####################
+
+ # This test checks that we behave properly in an out-of-memory
+ # situation. The test fails in V8 with an exception and takes a long
+ # time to do so.
+ 'js1_5/Regress/regress-271716-n': [SKIP],
+
+ # BUG(960): This test has an insane amount of output when it times out,
+ # messing up ability to see other failures on the waterfall.
+ 'js1_5/extensions/regress-342960': [SKIP],
+
+ # This test uses a unitialized variable. A Bug has been filed:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=575575
+ 'js1_5/Array/regress-465980-02': [SKIP],
+
+ # These tests are simply wrong (i.e., they do not test what they intend
+ # to test).
+ # In particular, these two compare numbers to NaN with != in the current
+ # version of the Mozilla tests. This is *fixed* in a later version.
+ # The tests should be re-enabled when switching to a new version.
+ 'ecma_3/Date/15.9.3.2-1': [SKIP],
+ 'js1_2/function/Number': [SKIP],
+
+ # TODO(2018): Temporarily allow timeout in debug mode.
+ 'js1_5/GC/regress-203278-2': [PASS, ['mode == debug', TIMEOUT, FAIL]],
+
+ ##################### SLOW TESTS #####################
+
+ # This takes a long time to run (~100 seconds). It should only be run
+ # by the really patient.
+ 'js1_5/GC/regress-324278': [SLOW],
+
+ # This takes a long time to run because our indexOf operation is
+ # pretty slow - it causes a lot of GCs; see issue
+ # #926379. We could consider marking this SKIP because it takes a
+ # while to run to completion.
+ 'js1_5/GC/regress-338653': [SLOW],
+
+ # This test is designed to run until it runs out of memory. This takes
+ # a very long time because it builds strings character by character
+ # and compiles a lot of regular expressions. We could consider marking
+ # this SKIP because it takes a while to run to completion.
+ 'js1_5/GC/regress-346794': [SLOW],
+
+ # Runs out of memory while trying to build huge string of 'x'
+ # characters. This takes a long time to run (~32 seconds).
+ 'js1_5/GC/regress-348532': [SLOW],
+
+
+ ##################### FLAKY TESTS #####################
+
+ # These tests time out in debug mode but pass in product mode
+ 'js1_5/Regress/regress-360969-03': [PASS, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-360969-04': [PASS, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-360969-05': [PASS, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-360969-06': [PASS, ['mode == debug', TIMEOUT]],
+ 'js1_5/extensions/regress-365527': [PASS, ['mode == debug', TIMEOUT]],
+ 'js1_5/Regress/regress-280769-3': [PASS, ['mode == debug', FAIL]],
+ 'js1_5/Regress/regress-203278-1': [PASS, ['mode == debug', FAIL]],
+ 'js1_5/Regress/regress-244470': [PASS, ['mode == debug', FAIL]],
+ 'ecma_3/RegExp/regress-209067': [PASS, ['mode == debug', FAIL]],
+ 'js1_5/GC/regress-278725': [PASS, ['mode == debug', FAIL]],
+ # http://b/issue?id=1206983
+ 'js1_5/Regress/regress-367561-03': [PASS, ['mode == debug', FAIL]],
+ 'ecma/Date/15.9.5.10-2': [PASS, FAIL, ['mode == debug', TIMEOUT]],
-##################### SKIPPED TESTS #####################
-
-# This test checks that we behave properly in an out-of-memory
-# situation. The test fails in V8 with an exception and takes a long
-# time to do so.
-js1_5/Regress/regress-271716-n: SKIP
-
-# BUG(960): This test has an insane amount of output when it times out,
-# messing up ability to see other failures on the waterfall.
-js1_5/extensions/regress-342960: SKIP
-
-# This test uses a unitialized variable. A Bug has been filed:
-# https://bugzilla.mozilla.org/show_bug.cgi?id=575575
-js1_5/Array/regress-465980-02: SKIP
-
-# These tests are simply wrong (i.e., they do not test what they intend
-# to test).
-# In particular, these two compare numbers to NaN with != in the current
-# version of the Mozilla tests. This is *fixed* in a later version.
-# The tests should be re-enabled when switching to a new version.
-ecma_3/Date/15.9.3.2-1: SKIP
-js1_2/function/Number: SKIP
-
-# TODO(2018): Temporarily allow timeout in debug mode.
-js1_5/GC/regress-203278-2: PASS || (TIMEOUT || FAIL) if $mode == debug
-
-##################### SLOW TESTS #####################
-
-# This takes a long time to run (~100 seconds). It should only be run
-# by the really patient.
-js1_5/GC/regress-324278: SLOW
-
-# This takes a long time to run because our indexOf operation is
-# pretty slow - it causes a lot of GCs; see issue
-# #926379. We could consider marking this SKIP because it takes a
-# while to run to completion.
-js1_5/GC/regress-338653: SLOW
-
-# This test is designed to run until it runs out of memory. This takes
-# a very long time because it builds strings character by character
-# and compiles a lot of regular expressions. We could consider marking
-# this SKIP because it takes a while to run to completion.
-js1_5/GC/regress-346794: SLOW
-
-# Runs out of memory while trying to build huge string of 'x'
-# characters. This takes a long time to run (~32 seconds).
-js1_5/GC/regress-348532: SLOW
+ # These tests create two Date objects just after each other and
+ # expects them to match. Sometimes this happens on the border
+ # between one second and the next.
+ 'ecma/Date/15.9.2.1': [PASS, FAIL],
+ 'ecma/Date/15.9.2.2-1': [PASS, FAIL],
+ 'ecma/Date/15.9.2.2-2': [PASS, FAIL],
+ 'ecma/Date/15.9.2.2-3': [PASS, FAIL],
+ 'ecma/Date/15.9.2.2-4': [PASS, FAIL],
+ 'ecma/Date/15.9.2.2-5': [PASS, FAIL],
+ 'ecma/Date/15.9.2.2-6': [PASS, FAIL],
+ # 1026139: These date tests fail on arm and mips
+ 'ecma/Date/15.9.5.29-1': [PASS, ['arch == arm or arch == mipsel', FAIL]],
+ 'ecma/Date/15.9.5.28-1': [PASS, ['arch == arm or arch == mipsel', FAIL]],
-##################### FLAKY TESTS #####################
+ # 1050186: Arm/MIPS vm is broken; probably unrelated to dates
+ 'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel', FAIL]],
+ 'ecma/Date/15.9.5.22-2': [PASS, ['arch == arm or arch == mipsel', FAIL]],
-# These tests time out in debug mode but pass in product mode
-js1_5/Regress/regress-360969-03: PASS || TIMEOUT if $mode == debug
-js1_5/Regress/regress-360969-04: PASS || TIMEOUT if $mode == debug
-js1_5/Regress/regress-360969-05: PASS || TIMEOUT if $mode == debug
-js1_5/Regress/regress-360969-06: PASS || TIMEOUT if $mode == debug
-js1_5/extensions/regress-365527: PASS || TIMEOUT if $mode == debug
+ # Flaky test that fails due to what appears to be a bug in the test.
+ # Occurs depending on current time
+ 'ecma/Date/15.9.5.8': [PASS, FAIL],
-js1_5/Regress/regress-280769-3: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-203278-1: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-244470: PASS || FAIL if $mode == debug
-ecma_3/RegExp/regress-209067: PASS || FAIL if $mode == debug
-js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
-# http://b/issue?id=1206983
-js1_5/Regress/regress-367561-03: PASS || FAIL if $mode == debug
-ecma/Date/15.9.5.10-2: PASS || (FAIL || TIMEOUT if $mode == debug)
+ # Severely brain-damaged test. Access to local variables must not
+ # be more than 2.5 times faster than access to global variables? WTF?
+ 'js1_5/Regress/regress-169559': [PASS, FAIL],
-# These tests create two Date objects just after each other and
-# expects them to match. Sometimes this happens on the border
-# between one second and the next.
-ecma/Date/15.9.2.1: PASS || FAIL
-ecma/Date/15.9.2.2-1: PASS || FAIL
-ecma/Date/15.9.2.2-2: PASS || FAIL
-ecma/Date/15.9.2.2-3: PASS || FAIL
-ecma/Date/15.9.2.2-4: PASS || FAIL
-ecma/Date/15.9.2.2-5: PASS || FAIL
-ecma/Date/15.9.2.2-6: PASS || FAIL
-# 1026139: These date tests fail on arm and mips
-ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
-ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
+ # Test that rely on specific timezone (not working in Denmark).
+ 'js1_5/Regress/regress-58116': [PASS, FAIL],
-# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
-ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mipsel)
-ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mipsel)
-# Flaky test that fails due to what appears to be a bug in the test.
-# Occurs depending on current time
-ecma/Date/15.9.5.8: PASS || FAIL
+ # Flaky random() test. Tests the distribution of calls to Math.random().
+ 'js1_5/Regress/regress-211590': [PASS, FAIL],
-# Severely brain-damaged test. Access to local variables must not
-# be more than 2.5 times faster than access to global variables? WTF?
-js1_5/Regress/regress-169559: PASS || FAIL
+ # Flaky tests; expect BigO-order computations to yield 1, but the code
+ # cannot handle outliers. See bug #925864.
+ 'ecma_3/RegExp/regress-311414': [PASS, FAIL],
+ 'ecma_3/RegExp/regress-289669': [PASS, FAIL],
+ 'js1_5/String/regress-314890': [PASS, FAIL],
+ 'js1_5/String/regress-56940-01': [PASS, FAIL],
+ 'js1_5/String/regress-56940-02': [PASS, FAIL],
+ 'js1_5/String/regress-157334-01': [PASS, FAIL],
+ 'js1_5/String/regress-322772': [PASS, FAIL],
+ 'js1_5/Array/regress-99120-01': [PASS, FAIL],
+ 'js1_5/Array/regress-99120-02': [PASS, FAIL],
+ 'js1_5/Regress/regress-347306-01': [PASS, FAIL],
+ 'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT]],
-# Test that rely on specific timezone (not working in Denmark).
-js1_5/Regress/regress-58116: PASS || FAIL
+ # The following two tests assume that daylight savings time starts first
+ # Sunday in April. This is not true when executing the tests outside
+ # California! In Denmark the adjustment starts one week earlier.
+ # Tests based on shell that use dates in this gap are flaky.
+ 'ecma/Date/15.9.5.10-1': [PASS, FAIL],
+ 'ecma/Date/15.9.5.12-1': [PASS, FAIL],
+ 'ecma/Date/15.9.5.14': [PASS, FAIL],
+ 'ecma/Date/15.9.5.34-1': [PASS, FAIL],
-# Flaky random() test. Tests the distribution of calls to Math.random().
-js1_5/Regress/regress-211590: PASS || FAIL
+ # These tests sometimes pass (in particular on Windows). They build up
+ # a lot of stuff on the stack, which normally causes a stack overflow,
+ # but sometimes it makes it through?
+ 'js1_5/Regress/regress-98901': [PASS, FAIL],
-# Flaky tests; expect BigO-order computations to yield 1, but the code
-# cannot handle outliers. See bug #925864.
-ecma_3/RegExp/regress-311414: PASS || FAIL
-ecma_3/RegExp/regress-289669: PASS || FAIL
-js1_5/String/regress-314890: PASS || FAIL
-js1_5/String/regress-56940-01: PASS || FAIL
-js1_5/String/regress-56940-02: PASS || FAIL
-js1_5/String/regress-157334-01: PASS || FAIL
-js1_5/String/regress-322772: PASS || FAIL
-js1_5/Array/regress-99120-01: PASS || FAIL
-js1_5/Array/regress-99120-02: PASS || FAIL
-js1_5/Regress/regress-347306-01: PASS || FAIL
-js1_5/Regress/regress-416628: PASS || FAIL || TIMEOUT if $mode == debug
+ # Tests that sorting arrays of ints is less than 3 times as fast
+ # as sorting arrays of strings.
+ 'js1_5/extensions/regress-371636': [PASS, FAIL, ['mode == debug', TIMEOUT]],
-# The following two tests assume that daylight savings time starts first Sunday
-# in April. This is not true when executing the tests outside California!
-# In Denmark the adjustment starts one week earlier!.
-# Tests based on shell that use dates in this gap are flaky.
-ecma/Date/15.9.5.10-1: PASS || FAIL
-ecma/Date/15.9.5.12-1: PASS || FAIL
-ecma/Date/15.9.5.14: PASS || FAIL
-ecma/Date/15.9.5.34-1: PASS || FAIL
+ # Tests depend on GC timings. Inherently flaky.
+ 'js1_5/GC/regress-383269-01': [PASS, FAIL],
+ 'js1_5/GC/regress-383269-02': [PASS, FAIL],
+ 'js1_5/Regress/regress-404755': [PASS, FAIL],
-# These tests sometimes pass (in particular on Windows). They build up
-# a lot of stuff on the stack, which normally causes a stack overflow,
-# but sometimes it makes it through?
-js1_5/Regress/regress-98901: PASS || FAIL
+ # Test that depends on timer resolution. Fails every now and then
+ # if we're unlucky enough to get a context switch at a bad time.
+ 'js1_5/extensions/regress-363258': [PASS, FAIL],
-# Tests that sorting arrays of ints is less than 3 times as fast
-# as sorting arrays of strings.
-js1_5/extensions/regress-371636: PASS || FAIL || TIMEOUT if $mode == debug
+ # Test that assumes specific runtime for a regexp, flaky in debug mode.
+ 'ecma_3/RegExp/regress-85721': [PASS, ['mode == debug', FAIL]],
-# Tests depend on GC timings. Inherently flaky.
-js1_5/GC/regress-383269-01: PASS || FAIL
-js1_5/GC/regress-383269-02: PASS || FAIL
-js1_5/Regress/regress-404755: PASS || FAIL
+ # Test that assumes specific execution time, flaky in debug mode.
+ 'js1_5/Array/regress-101964': [PASS, ['mode == debug', FAIL]],
-# Test that depends on timer resolution. Fails every now and then
-# if we're unlucky enough to get a context switch at a bad time.
-js1_5/extensions/regress-363258: PASS || FAIL
+ ##################### INCOMPATIBLE TESTS #####################
-# Test that assumes specific runtime for a regexp, flaky in debug mode.
-ecma_3/RegExp/regress-85721: PASS || FAIL if $mode == debug
+ # This section is for tests that fail in both V8 and JSC. Thus they
+ # have been determined to be incompatible between Mozilla and V8/JSC.
+ # toPrecision argument restricted to range 1..21 in JSC/V8 and ECMA-262
+ 'js1_5/Regress/regress-452346': [FAIL_OK],
-# Test that assumes specific execution time, flaky in debug mode.
-js1_5/Array/regress-101964: PASS || FAIL if $mode == debug
+ # Fail because it calls builtins as functions and do not expect the
+ # builtin to have undefined as the receiver.
+ 'ecma/String/15.5.4.6-2': [FAIL_OK],
+ # Fail because it expects String.prototype.split to distinguish whether
+ # separator was undefined or not passed at all.
+ 'ecma/String/15.5.4.8-2': [FAIL_OK],
-##################### INCOMPATIBLE TESTS #####################
+ # Fail because of toLowerCase and toUpperCase conversion.
+ 'ecma/String/15.5.4.11-2': [FAIL_OK],
+ 'ecma/String/15.5.4.11-5': [FAIL_OK],
+ 'ecma/String/15.5.4.12-1': [FAIL_OK],
+ 'ecma/String/15.5.4.12-4': [FAIL_OK],
-# This section is for tests that fail in both V8 and JSC. Thus they
-# have been determined to be incompatible between Mozilla and V8/JSC.
+ # This test uses an older version of the unicode standard that fails
+ # us because we correctly convert the armenian small ligature ech-yiwn
+ # to the two upper-case characters ECH and YIWN, whereas the older
+ # unicode version converts it to itself.
+ 'ecma/String/15.5.4.12-5': [FAIL_OK],
-# toPrecision argument restricted to range 1..21 in JSC/V8 and ECMA-262
-js1_5/Regress/regress-452346: FAIL_OK
+ # Creates a linked list of arrays until we run out of memory or timeout.
+ 'js1_5/Regress/regress-312588': [SKIP],
-# Fail because it calls builtins as functions and do not expect the
-# builtin to have undefined as the receiver.
-ecma/String/15.5.4.6-2: FAIL_OK
-# Fail because it expects String.prototype.split to distinguish whether
-# separator was undefined or not passed at all.
-ecma/String/15.5.4.8-2: FAIL_OK
+ # Runs out of memory because it compiles huge functions.
+ 'js1_5/Function/regress-338001': [FAIL_OK],
+ 'js1_5/Function/regress-338121-01': [FAIL_OK],
+ 'js1_5/Function/regress-338121-02': [FAIL_OK],
+ 'js1_5/Function/regress-338121-03': [FAIL_OK],
-# Fail because of toLowerCase and toUpperCase conversion.
-ecma/String/15.5.4.11-2: FAIL_OK
-ecma/String/15.5.4.11-5: FAIL_OK
-ecma/String/15.5.4.12-1: FAIL_OK
-ecma/String/15.5.4.12-4: FAIL_OK
+ # Expectes 'prototype' property of functions to be enumerable.
+ 'js1_5/Function/10.1.6-01': [FAIL_OK],
-# This test uses an older version of the unicode standard that fails
-# us because we correctly convert the armenian small ligature ech-yiwn
-# to the two upper-case characters ECH and YIWN, whereas the older
-# unicode version converts it to itself.
-ecma/String/15.5.4.12-5: FAIL_OK
+ #:=== RegExp:===
+ # We don't match the syntax error message of Mozilla for invalid
+ # RegExp flags.
+ 'ecma_3/RegExp/15.10.4.1-6': [FAIL_OK],
-# Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: SKIP
+ # PCRE doesn't allow subpattern nesting deeper than 200, this tests
+ # depth 500. JSC detects the case, and return null from the match,
+ # and passes this test (the test doesn't check for a correct return
+ # value).
+ 'ecma_3/RegExp/regress-119909': [PASS, FAIL_OK],
-# Runs out of memory because it compiles huge functions.
-js1_5/Function/regress-338001: FAIL_OK
-js1_5/Function/regress-338121-01: FAIL_OK
-js1_5/Function/regress-338121-02: FAIL_OK
-js1_5/Function/regress-338121-03: FAIL_OK
+ # Difference in the way capturing subpatterns work. In JS, when the
+ # 'minimum repeat count' is reached, the empty string must not match.
+ # In this case, we are similar but not identical to JSC. Hard to
+ # support the JS behavior with PCRE, so maybe emulate JSC?
+ 'ecma_3/RegExp/regress-209919': [PASS, FAIL_OK],
+ 'js1_5/extensions/regress-459606': [PASS, FAIL_OK],
-# Expectes 'prototype' property of functions to be enumerable.
-js1_5/Function/10.1.6-01: FAIL_OK
-#:=== RegExp:===
-# We don't match the syntax error message of Mozilla for invalid
-# RegExp flags.
-ecma_3/RegExp/15.10.4.1-6: FAIL_OK
+ # PCRE's match limit is reached. SpiderMonkey hangs on the first one,
+ # JSC returns true somehow. Maybe they up the match limit? There is
+ # an open V8 bug 676063 about this.
+ 'ecma_3/RegExp/regress-330684': [TIMEOUT],
-# PCRE doesn't allow subpattern nesting deeper than 200, this tests
-# depth 500. JSC detects the case, and return null from the match,
-# and passes this test (the test doesn't check for a correct return
-# value).
-ecma_3/RegExp/regress-119909: PASS || FAIL_OK
+ # This test contains a regexp that runs exponentially long. Spidermonkey
+ # standalone will hang, though apparently inside Firefox it will trigger a
+ # long-running-script timeout. JSCRE passes by hitting the matchLimit and
+ # just pretending that an exhaustive search found no match.
+ 'ecma_3/RegExp/regress-307456': [PASS, TIMEOUT],
-# Difference in the way capturing subpatterns work. In JS, when the
-# 'minimum repeat count' is reached, the empty string must not match.
-# In this case, we are similar but not identical to JSC. Hard to
-# support the JS behavior with PCRE, so maybe emulate JSC?
-ecma_3/RegExp/regress-209919: PASS || FAIL_OK
-js1_5/extensions/regress-459606: PASS || FAIL_OK
+ # We do not detect overflow in bounds for back references and {}
+ # quantifiers. Might fix by parsing numbers differently?
+ 'js1_5/Regress/regress-230216-2': [FAIL_OK],
-# PCRE's match limit is reached. SpiderMonkey hangs on the first one,
-# JSC returns true somehow. Maybe they up the match limit? There is
-# an open V8 bug 676063 about this.
-ecma_3/RegExp/regress-330684: TIMEOUT
+ # Regexp too long for PCRE.
+ 'js1_5/Regress/regress-280769': [PASS, FAIL],
+ 'js1_5/Regress/regress-280769-1': [PASS, FAIL],
+ 'js1_5/Regress/regress-280769-2': [PASS, FAIL],
+ 'js1_5/Regress/regress-280769-4': [PASS, FAIL],
+ 'js1_5/Regress/regress-280769-5': [PASS, FAIL],
-# This test contains a regexp that runs exponentially long. Spidermonkey
-# standalone will hang, though apparently inside Firefox it will trigger a
-# long-running-script timeout. JSCRE passes by hitting the matchLimit and
-# just pretending that an exhaustive search found no match.
-ecma_3/RegExp/regress-307456: PASS || TIMEOUT
+ # We do not support static RegExp.multiline - should we?.
+ 'js1_2/regexp/RegExp_multiline': [FAIL_OK],
+ 'js1_2/regexp/RegExp_multiline_as_array': [FAIL_OK],
+ 'js1_2/regexp/beginLine': [FAIL_OK],
+ 'js1_2/regexp/endLine': [FAIL_OK],
-# We do not detect overflow in bounds for back references and {}
-# quantifiers. Might fix by parsing numbers differently?
-js1_5/Regress/regress-230216-2: FAIL_OK
+ # We no longer let calls to test and exec with no argument implicitly
+ # use the previous input.
+ 'js1_2/regexp/RegExp_input': [FAIL_OK],
+ 'js1_2/regexp/RegExp_input_as_array': [FAIL_OK],
-# Regexp too long for PCRE.
-js1_5/Regress/regress-280769: PASS || FAIL
-js1_5/Regress/regress-280769-1: PASS || FAIL
-js1_5/Regress/regress-280769-2: PASS || FAIL
-js1_5/Regress/regress-280769-4: PASS || FAIL
-js1_5/Regress/regress-280769-5: PASS || FAIL
+ # To be compatible with safari typeof a regexp yields 'function';
+ # in firefox it yields 'object'.
+ 'js1_2/function/regexparg-1': [FAIL_OK],
-# We do not support static RegExp.multiline - should we?.
-js1_2/regexp/RegExp_multiline: FAIL_OK
-js1_2/regexp/RegExp_multiline_as_array: FAIL_OK
-js1_2/regexp/beginLine: FAIL_OK
-js1_2/regexp/endLine: FAIL_OK
+ # Date trouble?
+ 'js1_5/Date/regress-301738-02': [FAIL_OK],
-# We no longer let calls to test and exec with no argument implicitly
-# use the previous input.
-js1_2/regexp/RegExp_input: FAIL_OK
-js1_2/regexp/RegExp_input_as_array: FAIL_OK
+ # This test fails for all browsers on in the CET timezone.
+ 'ecma/Date/15.9.5.35-1': [PASS, FAIL_OK],
-# To be compatible with safari typeof a regexp yields 'function';
-# in firefox it yields 'object'.
-js1_2/function/regexparg-1: FAIL_OK
+ # Spidermonkey allows stuff in parenthesis directly after the minutes
+ # in a date. JSC does not, so we don't either.
+ 'js1_5/Date/regress-309925-02': [FAIL_OK],
-# Date trouble?
-js1_5/Date/regress-301738-02: FAIL_OK
+ # Print string after deleting array element?
+ 'js1_5/Expressions/regress-96526-delelem': [FAIL_OK],
-# This test fails for all browsers on in the CET timezone.
-ecma/Date/15.9.5.35-1: PASS || FAIL_OK
+ # Stack overflows should be InternalError: too much recursion?
+ 'js1_5/Regress/regress-234389': [FAIL_OK],
-# Spidermonkey allows stuff in parenthesis directly after the minutes
-# in a date. JSC does not, so we don't either.
-js1_5/Date/regress-309925-02: FAIL_OK
+ # This may very well be a bogus test. I'm not sure yet.
+ 'js1_5/Regress/regress-320119': [FAIL_OK],
-# Print string after deleting array element?
-js1_5/Expressions/regress-96526-delelem: FAIL_OK
+ # No support for toSource().
+ 'js1_5/Regress/regress-248444': [FAIL_OK],
+ 'js1_5/Regress/regress-313967-01': [FAIL_OK],
+ 'js1_5/Regress/regress-313967-02': [FAIL_OK],
-# Stack overflows should be InternalError: too much recursion?
-js1_5/Regress/regress-234389: FAIL_OK
+ # This fails because we don't have stack space for Function.prototype.apply
+ # with very large numbers of arguments. The test uses 2^24 arguments.
+ 'js1_5/Array/regress-350256-03': [FAIL_OK],
-# This may very well be a bogus test. I'm not sure yet.
-js1_5/Regress/regress-320119: FAIL_OK
+ # Extra arguments not handled properly in String.prototype.match
+ 'js1_5/Regress/regress-179524': [FAIL_OK],
-# No support for toSource().
-js1_5/Regress/regress-248444: FAIL_OK
-js1_5/Regress/regress-313967-01: FAIL_OK
-js1_5/Regress/regress-313967-02: FAIL_OK
+ # Uncategorized failures. Please help categorize (or fix) these failures.
+ 'js1_5/Regress/regress-172699': [FAIL_OK],
-# This fails because we don't have stack space for Function.prototype.apply
-# with very large numbers of arguments. The test uses 2^24 arguments.
-js1_5/Array/regress-350256-03: FAIL_OK
+ # Assumes that the prototype of a function is enumerable. Non-ECMA,
+ # see section 15.3.3.1, page 86.
+ 'ecma/GlobalObject/15.1.2.2-1': [FAIL_OK],
+ 'ecma/GlobalObject/15.1.2.3-1': [FAIL_OK],
+ 'ecma/GlobalObject/15.1.2.4': [FAIL_OK],
+ 'ecma/GlobalObject/15.1.2.5-1': [FAIL_OK],
+ 'ecma/GlobalObject/15.1.2.6': [FAIL_OK],
+ 'ecma/GlobalObject/15.1.2.7': [FAIL_OK],
-# Extra arguments not handled properly in String.prototype.match
-js1_5/Regress/regress-179524: FAIL_OK
+ # Leading zero no longer signal octal numbers (ECMA-262 Annex E 15.1.2.2).
+ 'ecma/GlobalObject/15.1.2.2-2': [FAIL_OK],
-# Uncategorized failures. Please help categorize (or fix) these failures.
-js1_5/Regress/regress-172699: FAIL_OK
+ # Tests that rely on specific details of function decompilation or
+ # print strings for errors. Non-ECMA behavior.
+ 'js1_2/function/tostring-2': [FAIL_OK],
+ 'js1_2/Objects/toString-001': [FAIL_OK],
+ 'js1_5/LexicalConventions/regress-469940': [FAIL_OK],
+ 'js1_5/Exceptions/regress-332472': [FAIL_OK],
+ 'js1_5/Regress/regress-173067': [FAIL_OK],
+ 'js1_5/Regress/regress-355556': [FAIL_OK],
+ 'js1_5/Regress/regress-328664': [FAIL_OK],
+ 'js1_5/Regress/regress-252892': [FAIL_OK],
+ 'js1_5/Regress/regress-352208': [FAIL_OK],
+ 'ecma_3/Array/15.4.5.1-01': [FAIL_OK],
+ 'ecma_3/Array/regress-387501': [FAIL_OK],
+ 'ecma_3/LexicalConventions/7.9.1': [FAIL_OK],
+ 'ecma_3/RegExp/regress-375711': [FAIL_OK],
+ 'ecma_3/Unicode/regress-352044-01': [FAIL_OK],
+ 'ecma_3/extensions/regress-274152': [FAIL_OK],
+ 'js1_5/Regress/regress-372364': [FAIL_OK],
+ 'js1_5/Regress/regress-420919': [FAIL_OK],
+ 'js1_5/Regress/regress-422348': [FAIL_OK],
+ 'js1_5/Regress/regress-410852': [FAIL_OK],
+ 'ecma_3/RegExp/regress-375715-04': [FAIL_OK],
+ 'js1_5/decompilation/regress-456964-01': [FAIL_OK],
+ 'js1_5/decompilation/regress-437288-02': [FAIL_OK],
+ 'js1_5/decompilation/regress-457824': [FAIL_OK],
+ 'js1_5/decompilation/regress-460116-01': [FAIL_OK],
+ 'js1_5/decompilation/regress-460116-02': [FAIL_OK],
+ 'js1_5/decompilation/regress-460501': [FAIL_OK],
+ 'js1_5/decompilation/regress-460116-03': [FAIL_OK],
+ 'js1_5/decompilation/regress-461110': [FAIL_OK],
-# Assumes that the prototype of a function is enumerable. Non-ECMA,
-# see section 15.3.3.1, page 86.
-ecma/GlobalObject/15.1.2.2-1: FAIL_OK
-ecma/GlobalObject/15.1.2.3-1: FAIL_OK
-ecma/GlobalObject/15.1.2.4: FAIL_OK
-ecma/GlobalObject/15.1.2.5-1: FAIL_OK
-ecma/GlobalObject/15.1.2.6: FAIL_OK
-ecma/GlobalObject/15.1.2.7: FAIL_OK
+ # Tests that use uneval. Non-ECMA.
+ 'js1_5/GC/regress-418128': [FAIL_OK],
+ 'js1_5/extensions/regress-465276': [FAIL_OK],
+ 'js1_5/Error/regress-465377': [FAIL_OK],
-# Leading zero no longer signal octal numbers (ECMA-262 Annex E 15.1.2.2).
-ecma/GlobalObject/15.1.2.2-2: FAIL_OK
+ # Tests that use the watch method. Non-ECMA.
+ 'js1_5/extensions/regress-435345-01': [FAIL_OK],
+ 'js1_5/extensions/regress-455413': [FAIL_OK],
-# Tests that rely on specific details of function decompilation or
-# print strings for errors. Non-ECMA behavior.
-js1_2/function/tostring-2: FAIL_OK
-js1_2/Objects/toString-001: FAIL_OK
-js1_5/LexicalConventions/regress-469940: FAIL_OK
-js1_5/Exceptions/regress-332472: FAIL_OK
-js1_5/Regress/regress-173067: FAIL_OK
-js1_5/Regress/regress-355556: FAIL_OK
-js1_5/Regress/regress-328664: FAIL_OK
-js1_5/Regress/regress-252892: FAIL_OK
-js1_5/Regress/regress-352208: FAIL_OK
-ecma_3/Array/15.4.5.1-01: FAIL_OK
-ecma_3/Array/regress-387501: FAIL_OK
-ecma_3/LexicalConventions/7.9.1: FAIL_OK
-ecma_3/RegExp/regress-375711: FAIL_OK
-ecma_3/Unicode/regress-352044-01: FAIL_OK
-ecma_3/extensions/regress-274152: FAIL_OK
-js1_5/Regress/regress-372364: FAIL_OK
-js1_5/Regress/regress-420919: FAIL_OK
-js1_5/Regress/regress-422348: FAIL_OK
-js1_5/Regress/regress-410852: FAIL_OK
-ecma_3/RegExp/regress-375715-04: FAIL_OK
-js1_5/decompilation/regress-456964-01: FAIL_OK
-js1_5/decompilation/regress-437288-02: FAIL_OK
-js1_5/decompilation/regress-457824: FAIL_OK
-js1_5/decompilation/regress-460116-01: FAIL_OK
-js1_5/decompilation/regress-460116-02: FAIL_OK
-js1_5/decompilation/regress-460501: FAIL_OK
-js1_5/decompilation/regress-460116-03: FAIL_OK
-js1_5/decompilation/regress-461110: FAIL_OK
+ # Uses Mozilla-specific QName, XML, XMLList and Iterator.
+ 'js1_5/Regress/regress-407323': [FAIL_OK],
+ 'js1_5/Regress/regress-407957': [FAIL_OK],
-# Tests that use uneval. Non-ECMA.
-js1_5/GC/regress-418128: FAIL_OK
-js1_5/extensions/regress-465276: FAIL_OK
-js1_5/Error/regress-465377: FAIL_OK
+ # Relies on JavaScript 1.2 / 1.3 deprecated features.
+ 'js1_2/function/String': [FAIL_OK],
+ 'js1_2/operator/equality': [FAIL_OK],
+ 'js1_2/version120/boolean-001': [FAIL_OK],
+ 'js1_2/String/concat': [FAIL_OK],
+ 'js1_2/function/Function_object': [FAIL_OK],
+ 'js1_2/function/tostring-1': [FAIL_OK],
+ 'js1_2/version120/regress-99663': [FAIL_OK],
+ 'js1_2/regexp/RegExp_lastIndex': [FAIL_OK],
+ 'js1_2/regexp/string_split': [FAIL_OK],
-# Tests that use the watch method. Non-ECMA.
-js1_5/extensions/regress-435345-01: FAIL_OK
-js1_5/extensions/regress-455413: FAIL_OK
+ # RegExps are not callable.
+ 'js1_2/regexp/simple_form': [FAIL_OK],
+ 'js1_2/regexp/regress-6359': [FAIL_OK],
+ 'js1_2/regexp/regress-9141': [FAIL_OK],
+ 'js1_5/Regress/regress-224956': [FAIL_OK],
+ 'js1_5/Regress/regress-325925': [FAIL_OK],
+ 'ecma_2/RegExp/regress-001': [FAIL_OK],
-# Uses Mozilla-specific QName, XML, XMLList and Iterator.
-js1_5/Regress/regress-407323: FAIL_OK
-js1_5/Regress/regress-407957: FAIL_OK
+ # We do not check for bad surrogate pairs when quoting strings.
+ 'js1_5/Regress/regress-315974': [FAIL_OK],
-# Relies on JavaScript 1.2 / 1.3 deprecated features.
-js1_2/function/String: FAIL_OK
-js1_2/operator/equality: FAIL_OK
-js1_2/version120/boolean-001: FAIL_OK
-js1_2/String/concat: FAIL_OK
-js1_2/function/Function_object: FAIL_OK
-js1_2/function/tostring-1: FAIL_OK
-js1_2/version120/regress-99663: FAIL_OK
-js1_2/regexp/RegExp_lastIndex: FAIL_OK
-js1_2/regexp/string_split: FAIL_OK
+ # Use unsupported "watch".
+ 'js1_5/Regress/regress-213482': [FAIL_OK],
+ 'js1_5/Regress/regress-240577': [FAIL_OK],
+ 'js1_5/Regress/regress-355344': [FAIL_OK],
+ 'js1_5/Object/regress-362872-01': [FAIL_OK],
+ 'js1_5/Object/regress-362872-02': [FAIL_OK],
+ 'js1_5/Regress/regress-361467': [FAIL_OK],
+ 'js1_5/Regress/regress-385393-06': [FAIL_OK],
+ 'js1_5/Regress/regress-506567': [FAIL_OK],
-# RegExps are not callable.
-js1_2/regexp/simple_form: FAIL_OK
-js1_2/regexp/regress-6359: FAIL_OK
-js1_2/regexp/regress-9141: FAIL_OK
-js1_5/Regress/regress-224956: FAIL_OK
-js1_5/Regress/regress-325925: FAIL_OK
-ecma_2/RegExp/regress-001: FAIL_OK
+ # Use special Mozilla getter/setter syntax
+ 'js1_5/Regress/regress-354924': [FAIL_OK],
+ 'js1_5/Regress/regress-355341': [FAIL_OK],
+ 'js1_5/GC/regress-316885-01': [FAIL_OK],
+ 'js1_5/GetSet/getset-002': [FAIL_OK],
+ 'js1_5/GetSet/regress-353264': [FAIL_OK],
+ 'js1_5/Regress/regress-361617': [FAIL_OK],
+ 'js1_5/Regress/regress-362583': [FAIL_OK],
+ 'js1_5/extensions/regress-356378': [FAIL_OK],
+ 'js1_5/extensions/regress-452178': [FAIL_OK],
-# We do not check for bad surrogate pairs when quoting strings.
-js1_5/Regress/regress-315974: FAIL_OK
+ # Requires Mozilla-specific strict mode or options() function.
+ 'ecma_3/Object/8.6.1-01': [FAIL_OK],
+ 'js1_5/Exceptions/regress-315147': [FAIL_OK],
+ 'js1_5/Regress/regress-106244': [FAIL_OK],
+ 'js1_5/Regress/regress-317533': [FAIL_OK],
+ 'js1_5/Regress/regress-323314-1': [FAIL_OK],
+ 'js1_5/Regress/regress-352197': [FAIL_OK],
-# Use unsupported "watch".
-js1_5/Regress/regress-213482: FAIL_OK
-js1_5/Regress/regress-240577: FAIL_OK
-js1_5/Regress/regress-355344: FAIL_OK
-js1_5/Object/regress-362872-01: FAIL_OK
-js1_5/Object/regress-362872-02: FAIL_OK
-js1_5/Regress/regress-361467: FAIL_OK
-js1_5/Regress/regress-385393-06: FAIL_OK
-js1_5/Regress/regress-506567: FAIL_OK
+ # Equivalent to assert(false).
+ 'ecma_2/RegExp/exec-001': [FAIL_OK],
+ 'ecma_2/String/replace-001': [FAIL_OK],
-# Use special Mozilla getter/setter syntax
-js1_5/Regress/regress-354924: FAIL_OK
-js1_5/Regress/regress-355341: FAIL_OK
-js1_5/GC/regress-316885-01: FAIL_OK
-js1_5/GetSet/getset-002: FAIL_OK
-js1_5/GetSet/regress-353264: FAIL_OK
-js1_5/Regress/regress-361617: FAIL_OK
-js1_5/Regress/regress-362583: FAIL_OK
-js1_5/extensions/regress-356378: FAIL_OK
-js1_5/extensions/regress-452178: FAIL_OK
+ # We do not strip unicode format control characters. This is really
+ # required for working with non-latin character sets. We match JSC
+ # and IE here. Firefox matches the spec (section 7.1).
+ 'ecma_3/Unicode/uc-001': [FAIL_OK],
-# Requires Mozilla-specific strict mode or options() function.
-ecma_3/Object/8.6.1-01: FAIL_OK
-js1_5/Exceptions/regress-315147: FAIL_OK
-js1_5/Regress/regress-106244: FAIL_OK
-js1_5/Regress/regress-317533: FAIL_OK
-js1_5/Regress/regress-323314-1: FAIL_OK
-js1_5/Regress/regress-352197: FAIL_OK
+ # A non-breaking space doesn't match \s in a regular expression. This
+ # behaviour matches JSC. All the VMs have different behaviours in which
+ # characters match \s so we do the same as JSC until they change.
+ 'ecma_3/Unicode/uc-002': [PASS, FAIL_OK],
-# Equivalent to assert(false).
-ecma_2/RegExp/exec-001: FAIL_OK
-ecma_2/String/replace-001: FAIL_OK
+ # String.prototype.split on empty strings always returns an array
+ # with one element (as specified in ECMA-262).
+ 'js1_2/Array/array_split_1': [FAIL_OK],
-# We do not strip unicode format control characters. This is really
-# required for working with non-latin character sets. We match JSC
-# and IE here. Firefox matches the spec (section 7.1).
-ecma_3/Unicode/uc-001: FAIL_OK
+ # The concat() method is defined in Array.prototype; not Array.
+ 'js1_5/Array/regress-313153': [FAIL_OK],
-# A non-breaking space doesn't match \s in a regular expression. This behaviour
-# matches JSC. All the VMs have different behaviours in which characters match
-# \s so we do the same as JSC until they change.
-ecma_3/Unicode/uc-002: PASS || FAIL_OK
+ # The join() method is defined on Array.prototype; not Array.
+ 'js1_5/Array/regress-474529': [FAIL_OK],
+ # The lastIndexOf() method is defined on Array.prototype, not Array.
+ 'ecma_3/Array/15.5.4.8-01': [FAIL_OK],
-# String.prototype.split on empty strings always returns an array
-# with one element (as specified in ECMA-262).
-js1_2/Array/array_split_1: FAIL_OK
+ # Properties fileName, and lineNumber of Error instances are
+ # not supported. Mozilla specific extension.
+ 'js1_5/Exceptions/errstack-001': [FAIL_OK],
+ 'js1_5/Exceptions/regress-257751': [FAIL_OK],
+ 'js1_5/Regress/regress-119719': [FAIL_OK],
+ 'js1_5/Regress/regress-167328': [FAIL_OK],
+ 'js1_5/Regress/regress-243869': [FAIL_OK],
-# The concat() method is defined in Array.prototype; not Array.
-js1_5/Array/regress-313153: FAIL_OK
+ # Unsupported import/export and <xml> literals. Mozilla extensions.
+ 'js1_5/Regress/regress-249211': [FAIL_OK],
+ 'js1_5/Regress/regress-309242': [FAIL_OK],
+ 'js1_5/Regress/regress-350692': [FAIL_OK],
+ 'js1_5/extensions/regress-421621': [FAIL_OK],
+ 'js1_5/extensions/regress-432075': [FAIL_OK],
-# The join() method is defined on Array.prototype; not Array.
-js1_5/Array/regress-474529: FAIL_OK
-# The lastIndexOf() method is defined on Array.prototype, not Array.
-ecma_3/Array/15.5.4.8-01: FAIL_OK
+ # The length of Error functions is 1 not 3.
+ 'js1_5/Exceptions/regress-123002': [FAIL_OK],
-# Properties fileName, and lineNumber of Error instances are
-# not supported. Mozilla specific extension.
-js1_5/Exceptions/errstack-001: FAIL_OK
-js1_5/Exceptions/regress-257751: FAIL_OK
-js1_5/Regress/regress-119719: FAIL_OK
-js1_5/Regress/regress-167328: FAIL_OK
-js1_5/Regress/regress-243869: FAIL_OK
+ # Reserved keywords as function names, etc is not supported.
+ 'js1_5/LexicalConventions/regress-343675': [FAIL_OK],
-# Unsupported import/export and <xml> literals. Mozilla extensions.
-js1_5/Regress/regress-249211: FAIL_OK
-js1_5/Regress/regress-309242: FAIL_OK
-js1_5/Regress/regress-350692: FAIL_OK
-js1_5/extensions/regress-421621: FAIL_OK
-js1_5/extensions/regress-432075: FAIL_OK
+ # Tests if future reserved keywords of ECMA-262, edition 3 emit warnings. We
+ # implement the edition 5 behaviour and fail on use of edition 5 future
+ # reserved keywords as identifiers.
+ 'js1_5/Regress/regress-240317': [FAIL_OK],
-# The length of Error functions is 1 not 3.
-js1_5/Exceptions/regress-123002: FAIL_OK
+ # Unsupported list comprehensions: [ ... for ... ] and for each.
+ 'js1_5/Regress/regress-352009': [FAIL_OK],
+ 'js1_5/Regress/regress-349648': [FAIL_OK],
-# Reserved keywords as function names, etc is not supported.
-js1_5/LexicalConventions/regress-343675: FAIL_OK
+ # Expects top level arguments (passed on command line?) to be
+ # the empty string?
+ 'js1_5/Regress/regress-336100': [FAIL_OK],
-# Tests if future reserved keywords of ECMA-262, edition 3 emit warnings. We
-# implement the edition 5 behaviour and fail on use of edition 5 future reserved
-# keywords as identifiers.
-js1_5/Regress/regress-240317: FAIL_OK
+ # Regular expression test failures due to PCRE. We match JSC (ie, perl)
+ # behavior and not the ECMA spec.
+ 'ecma_3/RegExp/perlstress-001': [PASS, FAIL_OK],
+ 'ecma_3/RegExp/regress-334158': [PASS, FAIL],
-# Unsupported list comprehensions: [ ... for ... ] and for each.
-js1_5/Regress/regress-352009: FAIL_OK
-js1_5/Regress/regress-349648: FAIL_OK
+ # This test fails due to http://code.google.com/p/v8/issues/detail?id=187
+ # Failure to clear captures when a lookahead is unwound.
+ 'ecma_3/RegExp/15.10.2-1': [PASS, FAIL_OK],
+ # This test requires a failure if we try to compile a function with more
+ # than 65536 arguments. This seems to be a Mozilla restriction.
+ 'js1_5/Regress/regress-290575': [PASS, FAIL_OK],
-# Expects top level arguments (passed on command line?) to be
-# the empty string?
-js1_5/Regress/regress-336100: FAIL_OK
+ # Fails because of the way function declarations are
+ # handled in V8/JSC. V8 follows IE behavior and introduce
+ # all nested function declarations when entering the
+ # surrounding function, whereas Spidermonkey declares
+ # them dynamically when the statement is executed.
+ 'ecma_3/Function/scope-001': [FAIL_OK],
+ 'ecma_3/FunExpr/fe-001': [FAIL_OK],
+ 'js1_5/Scope/regress-184107': [FAIL_OK],
-# Regular expression test failures due to PCRE. We match JSC (ie, perl)
-# behavior and not the ECMA spec.
-ecma_3/RegExp/perlstress-001: PASS || FAIL_OK
-ecma_3/RegExp/regress-334158: PASS || FAIL
-# This test fails due to http://code.google.com/p/v8/issues/detail?id=187
-# Failure to clear captures when a lookahead is unwound.
-ecma_3/RegExp/15.10.2-1: PASS || FAIL_OK
+ # Function is deletable in V8 and JSC.
+ 'js1_5/Regress/regress-352604': [FAIL_OK],
-# This test requires a failure if we try to compile a function with more
-# than 65536 arguments. This seems to be a Mozilla restriction.
-js1_5/Regress/regress-290575: PASS || FAIL_OK
+ # Cannot call strings as functions. Expects not to crash.
+ 'js1_5/Regress/regress-417893': [FAIL_OK],
-# Fails because of the way function declarations are
-# handled in V8/JSC. V8 follows IE behavior and introduce
-# all nested function declarations when entering the
-# surrounding function, whereas Spidermonkey declares
-# them dynamically when the statement is executed.
-ecma_3/Function/scope-001: FAIL_OK
-ecma_3/FunExpr/fe-001: FAIL_OK
-js1_5/Scope/regress-184107: FAIL_OK
+ # Unsupported use of "[]" as function parameter. We match JSC.
+ 'js1_5/Regress/regress-416737-01': [FAIL_OK],
+ 'js1_5/Regress/regress-416737-02': [FAIL_OK],
-# Function is deletable in V8 and JSC.
-js1_5/Regress/regress-352604: FAIL_OK
+ # Illegal escape-sequences in string literals. Has already been fixed
+ # by most engines (i.e. V8, JSC, Opera and FF).
+ 'ecma/Array/15.4.5.1-1': [FAIL_OK],
+ 'ecma/LexicalConventions/7.7.4': [FAIL_OK],
+ 'ecma_2/RegExp/hex-001': [FAIL_OK],
+ 'js1_2/regexp/hexadecimal': [FAIL_OK],
-# Cannot call strings as functions. Expects not to crash.
-js1_5/Regress/regress-417893: FAIL_OK
-
-# Unsupported use of "[]" as function parameter. We match JSC.
-js1_5/Regress/regress-416737-01: FAIL_OK
-js1_5/Regress/regress-416737-02: FAIL_OK
-
-
-# Illegal escape-sequences in string literals. Has already been fixed
-# by most engines (i.e. V8, JSC, Opera and FF).
-ecma/Array/15.4.5.1-1: FAIL_OK
-ecma/LexicalConventions/7.7.4: FAIL_OK
-ecma_2/RegExp/hex-001: FAIL_OK
-js1_2/regexp/hexadecimal: FAIL_OK
-
-
-# The source field of RegExp objects is properly escaped. We match JSC.
-ecma_2/RegExp/constructor-001: FAIL_OK
-ecma_2/RegExp/function-001: FAIL_OK
-ecma_2/RegExp/properties-001: FAIL_OK
-
-
-# Negative hexadecimal literals are parsed as NaN. This test is outdated.
-ecma/TypeConversion/9.3.1-3: FAIL_OK
-
-
-##################### FAILING TESTS #####################
-
-# This section is for tests that fail in V8 and pass in JSC.
-# Tests that fail in both V8 and JSC belong in the FAIL_OK
-# category.
-
-# This fails because we don't handle Function.prototype.apply with very large
-# numbers of arguments (depending on max stack size). 350256-02 needs more than
-# 4Mbytes of stack space.
-js1_5/Array/regress-350256-02: FAIL
-
-
-# This test seems designed to fail (it produces a 700Mbyte string).
-# We fail on out of memory. The important thing is not to crash.
-js1_5/Regress/regress-303213: FAIL || TIMEOUT if $mode == debug
-
-# This test fails since we now throw in String.prototype.match when apply
-# is given null or undefined as this argument (and so does firefox nightly).
-js1_5/Regress/regress-295052: FAIL
-
-# Bug 1202592: New ecma_3/String/15.5.4.11 is failing.
-ecma_3/String/15.5.4.11: FAIL
-
-# Bug 1202597: New js1_5/Expressions/regress-394673 is failing.
-# Marked as: Will not fix. V8 throws an acceptable RangeError.
-js1_5/Expressions/regress-394673: FAIL
-
-
-# Bug 762: http://code.google.com/p/v8/issues/detail?id=762
-# We do not correctly handle assignments within "with"
-ecma_3/Statements/12.10-01: FAIL
-
-# We do not throw an exception when a const is redeclared.
-# (We only fail section 1 of the test.)
-js1_5/Regress/regress-103602: FAIL
-
-##################### MOZILLA EXTENSION TESTS #####################
-
-ecma/extensions/15.1.2.1-1: FAIL_OK
-ecma_3/extensions/regress-385393-03: FAIL_OK
-ecma_3/extensions/7.9.1: FAIL_OK
-js1_5/extensions/catchguard-001: FAIL_OK
-js1_5/extensions/catchguard-002: FAIL_OK
-js1_5/extensions/catchguard-003: FAIL_OK
-js1_5/extensions/getset-001: FAIL_OK
-js1_5/extensions/getset-003: FAIL_OK
-js1_5/extensions/no-such-method: FAIL_OK
-js1_5/extensions/regress-104077: FAIL_OK
-js1_5/extensions/regress-226078: FAIL_OK
-js1_5/extensions/regress-303277: FAIL_OK
-js1_5/extensions/regress-304897: FAIL_OK
-js1_5/extensions/regress-306738: FAIL_OK
-js1_5/extensions/regress-311161: FAIL_OK
-js1_5/extensions/regress-311583: FAIL_OK
-js1_5/extensions/regress-311792-01: FAIL_OK
-js1_5/extensions/regress-312278: FAIL_OK
-js1_5/extensions/regress-313630: FAIL_OK
-js1_5/extensions/regress-313763: FAIL_OK
-js1_5/extensions/regress-313803: FAIL_OK
-js1_5/extensions/regress-314874: FAIL_OK
-js1_5/extensions/regress-322957: FAIL_OK
-js1_5/extensions/regress-328556: FAIL_OK
-js1_5/extensions/regress-333541: FAIL_OK
-js1_5/extensions/regress-335700: FAIL_OK
-js1_5/extensions/regress-336409-1: FAIL_OK
-js1_5/extensions/regress-336409-2: FAIL_OK
-js1_5/extensions/regress-336410-2: FAIL_OK
-js1_5/extensions/regress-341956-01: FAIL_OK
-js1_5/extensions/regress-345967: FAIL_OK
-js1_5/extensions/regress-346494-01: FAIL_OK
-js1_5/extensions/regress-346494: FAIL_OK
-js1_5/extensions/regress-347306-02: FAIL_OK
-js1_5/extensions/regress-348986: FAIL_OK
-js1_5/extensions/regress-349616: FAIL_OK
-js1_5/extensions/regress-350312-02: FAIL_OK
-js1_5/extensions/regress-350312-03: FAIL_OK
-js1_5/extensions/regress-350531: FAIL_OK
-js1_5/extensions/regress-351102-01: FAIL_OK
-js1_5/extensions/regress-351102-02: FAIL_OK
-js1_5/extensions/regress-351102-06: FAIL_OK
-js1_5/extensions/regress-351973: FAIL_OK
-js1_5/extensions/regress-352060: FAIL_OK
-js1_5/extensions/regress-352094: FAIL_OK
-js1_5/extensions/regress-352261: FAIL_OK
-js1_5/extensions/regress-352281: FAIL_OK
-js1_5/extensions/regress-352455: FAIL_OK
-js1_5/extensions/regress-352604: FAIL_OK
-js1_5/extensions/regress-353214: FAIL_OK
-js1_5/extensions/regress-355339: FAIL_OK
-js1_5/extensions/regress-355497: FAIL_OK
-js1_5/extensions/regress-355622: FAIL_OK
-js1_5/extensions/regress-355736: FAIL_OK
-js1_5/extensions/regress-356085: FAIL_OK
-js1_5/extensions/regress-356106: FAIL_OK
-js1_5/extensions/regress-358594-01: FAIL_OK
-js1_5/extensions/regress-358594-02: FAIL_OK
-js1_5/extensions/regress-358594-03: FAIL_OK
-js1_5/extensions/regress-358594-04: FAIL_OK
-js1_5/extensions/regress-358594-05: FAIL_OK
-js1_5/extensions/regress-358594-06: FAIL_OK
-js1_5/extensions/regress-361346: FAIL_OK
-js1_5/extensions/regress-361360: FAIL_OK
-js1_5/extensions/regress-361558: FAIL_OK
-js1_5/extensions/regress-361571: FAIL_OK
-js1_5/extensions/regress-361856: FAIL_OK
-js1_5/extensions/regress-361964: FAIL_OK
-js1_5/extensions/regress-363988: FAIL_OK
-js1_5/extensions/regress-365869: FAIL_OK
-js1_5/extensions/regress-367630: FAIL_OK
-js1_5/extensions/regress-367923: FAIL_OK
-js1_5/extensions/regress-368859: FAIL_OK
-js1_5/extensions/regress-369696-01: FAIL_OK
-js1_5/extensions/regress-369696-02: FAIL_OK
-js1_5/extensions/regress-369696-03: FAIL_OK
-js1_5/extensions/regress-374589: FAIL_OK
-js1_5/extensions/regress-375801: FAIL_OK
-js1_5/extensions/regress-376052: FAIL_OK
-js1_5/extensions/regress-379523: FAIL_OK
-js1_5/extensions/regress-380581: FAIL_OK
-js1_5/extensions/regress-380831: FAIL_OK
-js1_5/extensions/regress-381205: FAIL_OK
-js1_5/extensions/regress-381211: FAIL_OK
-js1_5/extensions/regress-381304: FAIL_OK
-js1_5/extensions/regress-382509: FAIL_OK
-js1_5/extensions/regress-383965: FAIL_OK
-js1_5/extensions/regress-384680: FAIL_OK
-js1_5/extensions/regress-385393-09: FAIL_OK
-js1_5/extensions/regress-407501: FAIL_OK
-js1_5/extensions/regress-418730: FAIL_OK
-js1_5/extensions/regress-420612: FAIL_OK
-js1_5/extensions/regress-420869-01: FAIL_OK
-js1_5/extensions/regress-424257: FAIL_OK
-js1_5/extensions/regress-424683-01: FAIL_OK
-js1_5/extensions/regress-429739: FAIL_OK
-js1_5/extensions/regress-454142: FAIL_OK
-js1_5/extensions/regress-465145: FAIL_OK
-js1_5/extensions/regress-469625: FAIL_OK
-js1_5/extensions/regress-472787: FAIL_OK
-js1_5/extensions/regress-44009: FAIL_OK
-js1_5/extensions/regress-50447-1: FAIL_OK
-js1_5/extensions/regress-50447: FAIL_OK
-js1_5/extensions/regress-90596-001: FAIL_OK
-js1_5/extensions/regress-90596-002: FAIL_OK
-js1_5/extensions/regress-96284-001: FAIL_OK
-js1_5/extensions/regress-96284-002: FAIL_OK
-js1_5/extensions/toLocaleFormat-01: FAIL_OK
-js1_5/extensions/toLocaleFormat-02: FAIL_OK
-
-js1_5/extensions/regress-330569: TIMEOUT
-js1_5/extensions/regress-351448: TIMEOUT
-# In the 64-bit version, this test takes longer to run out of memory
-# than it does in the 32-bit version when attempting to generate a huge
-# error message in debug mode.
-js1_5/extensions/regress-336410-1: FAIL_OK || TIMEOUT if ($mode == debug && $arch == x64)
-
-##################### DECOMPILATION TESTS #####################
-
-# We don't really about the outcome of running the
-# decompilation tests as long as they don't crash or
-# timeout.
-
-js1_5/decompilation/regress-344120: PASS || FAIL
-js1_5/decompilation/regress-346892: PASS || FAIL
-js1_5/decompilation/regress-346902: PASS || FAIL
-js1_5/decompilation/regress-346904: PASS || FAIL
-js1_5/decompilation/regress-346915: PASS || FAIL
-js1_5/decompilation/regress-349484: PASS || FAIL
-js1_5/decompilation/regress-349489: PASS || FAIL
-js1_5/decompilation/regress-349491: PASS || FAIL
-js1_5/decompilation/regress-349596: PASS || FAIL
-js1_5/decompilation/regress-349650: PASS || FAIL
-js1_5/decompilation/regress-349663: PASS || FAIL
-js1_5/decompilation/regress-350242: PASS || FAIL
-js1_5/decompilation/regress-350263: PASS || FAIL
-js1_5/decompilation/regress-350271: PASS || FAIL
-js1_5/decompilation/regress-350666: PASS || FAIL
-js1_5/decompilation/regress-350670: PASS || FAIL
-js1_5/decompilation/regress-351104: PASS || FAIL
-js1_5/decompilation/regress-351219: PASS || FAIL
-js1_5/decompilation/regress-351336: PASS || FAIL
-js1_5/decompilation/regress-351597: PASS || FAIL
-js1_5/decompilation/regress-351625: PASS || FAIL
-js1_5/decompilation/regress-351626: PASS || FAIL
-js1_5/decompilation/regress-351693: PASS || FAIL
-js1_5/decompilation/regress-351705: PASS || FAIL
-js1_5/decompilation/regress-351793: PASS || FAIL
-js1_5/decompilation/regress-352013: PASS || FAIL
-js1_5/decompilation/regress-352022: PASS || FAIL
-js1_5/decompilation/regress-352073: PASS || FAIL
-js1_5/decompilation/regress-352202: PASS || FAIL
-js1_5/decompilation/regress-352312: PASS || FAIL
-js1_5/decompilation/regress-352360: PASS || FAIL
-js1_5/decompilation/regress-352375: PASS || FAIL
-js1_5/decompilation/regress-352453: PASS || FAIL
-js1_5/decompilation/regress-352649: PASS || FAIL
-js1_5/decompilation/regress-352873-01: PASS || FAIL
-js1_5/decompilation/regress-352873-02: PASS || FAIL
-js1_5/decompilation/regress-353000: PASS || FAIL
-js1_5/decompilation/regress-353120: PASS || FAIL
-js1_5/decompilation/regress-353146: PASS || FAIL
-js1_5/decompilation/regress-354878: PASS || FAIL
-js1_5/decompilation/regress-354910: PASS || FAIL
-js1_5/decompilation/regress-355992: PASS || FAIL
-js1_5/decompilation/regress-356083: PASS || FAIL
-js1_5/decompilation/regress-356248: PASS || FAIL
-js1_5/decompilation/regress-371692: PASS || FAIL
-js1_5/decompilation/regress-373678: PASS || FAIL
-js1_5/decompilation/regress-375639: PASS || FAIL
-js1_5/decompilation/regress-375882: PASS || FAIL
-js1_5/decompilation/regress-376564: PASS || FAIL
-js1_5/decompilation/regress-383721: PASS || FAIL
-js1_5/decompilation/regress-406555: PASS || FAIL
-js1_5/decompilation/regress-460870: PASS || FAIL
-
-
-[ $arch == arm ]
-
-# BUG(3251229): Times out when running new crankshaft test script.
-ecma_3/RegExp/regress-311414: SKIP
-ecma/Date/15.9.5.8: SKIP
-ecma/Date/15.9.5.10-2: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Date/15.9.5.12-2: SKIP
-js1_5/Array/regress-99120-02: SKIP
-js1_5/extensions/regress-371636: SKIP
-js1_5/Regress/regress-203278-1: SKIP
-js1_5/Regress/regress-404755: SKIP
-js1_5/Regress/regress-451322: SKIP
-
-
-# BUG(1040): Allow this test to timeout.
-js1_5/GC/regress-203278-2: PASS || TIMEOUT
-
-
-[ $arch == mipsel ]
-
-# BUG(3251229): Times out when running new crankshaft test script.
-ecma_3/RegExp/regress-311414: SKIP
-ecma/Date/15.9.5.8: SKIP
-ecma/Date/15.9.5.10-2: SKIP
-ecma/Date/15.9.5.11-2: SKIP
-ecma/Date/15.9.5.12-2: SKIP
-js1_5/Array/regress-99120-02: SKIP
-js1_5/extensions/regress-371636: SKIP
-js1_5/Regress/regress-203278-1: SKIP
-js1_5/Regress/regress-404755: SKIP
-js1_5/Regress/regress-451322: SKIP
-
-
-# BUG(1040): Allow this test to timeout.
-js1_5/GC/regress-203278-2: PASS || TIMEOUT
+ # The source field of RegExp objects is properly escaped. We match JSC.
+ 'ecma_2/RegExp/constructor-001': [FAIL_OK],
+ 'ecma_2/RegExp/function-001': [FAIL_OK],
+ 'ecma_2/RegExp/properties-001': [FAIL_OK],
+
+
+ # Negative hexadecimal literals are parsed as NaN. This test is outdated.
+ 'ecma/TypeConversion/9.3.1-3': [FAIL_OK],
+
+
+ ##################### FAILING TESTS #####################
+
+ # This section is for tests that fail in V8 and pass in JSC.
+ # Tests that fail in both V8 and JSC belong in the FAIL_OK
+ # category.
+
+ # This fails because we don't handle Function.prototype.apply with very large
+ # numbers of arguments (depending on max stack size). 350256-02 needs more
+ # than 4Mbytes of stack space.
+ 'js1_5/Array/regress-350256-02': [FAIL],
+
+
+ # This test seems designed to fail (it produces a 700Mbyte string).
+ # We fail on out of memory. The important thing is not to crash.
+ 'js1_5/Regress/regress-303213': [FAIL, ['mode == debug', TIMEOUT]],
+
+ # This test fails since we now throw in String.prototype.match when apply
+ # is given null or undefined as this argument (and so does firefox nightly).
+ 'js1_5/Regress/regress-295052': [FAIL],
+
+ # Bug 1202592: New ecma_3/String/15.5.4.11 is failing.
+ 'ecma_3/String/15.5.4.11': [FAIL],
+
+ # Bug 1202597: New js1_5/Expressions/regress-394673 is failing.
+ # Marked as: Will not fix. V8 throws an acceptable RangeError.
+ 'js1_5/Expressions/regress-394673': [FAIL],
+
+
+ # Bug 762: http://code.google.com/p/v8/issues/detail?id=762
+ # We do not correctly handle assignments within "with"
+ 'ecma_3/Statements/12.10-01': [FAIL],
+
+ # We do not throw an exception when a const is redeclared.
+ # (We only fail section 1 of the test.)
+ 'js1_5/Regress/regress-103602': [FAIL],
+
+ ##################### MOZILLA EXTENSION TESTS #####################
+
+ 'ecma/extensions/15.1.2.1-1': [FAIL_OK],
+ 'ecma_3/extensions/regress-385393-03': [FAIL_OK],
+ 'ecma_3/extensions/7.9.1': [FAIL_OK],
+ 'js1_5/extensions/catchguard-001': [FAIL_OK],
+ 'js1_5/extensions/catchguard-002': [FAIL_OK],
+ 'js1_5/extensions/catchguard-003': [FAIL_OK],
+ 'js1_5/extensions/getset-001': [FAIL_OK],
+ 'js1_5/extensions/getset-003': [FAIL_OK],
+ 'js1_5/extensions/no-such-method': [FAIL_OK],
+ 'js1_5/extensions/regress-104077': [FAIL_OK],
+ 'js1_5/extensions/regress-226078': [FAIL_OK],
+ 'js1_5/extensions/regress-303277': [FAIL_OK],
+ 'js1_5/extensions/regress-304897': [FAIL_OK],
+ 'js1_5/extensions/regress-306738': [FAIL_OK],
+ 'js1_5/extensions/regress-311161': [FAIL_OK],
+ 'js1_5/extensions/regress-311583': [FAIL_OK],
+ 'js1_5/extensions/regress-311792-01': [FAIL_OK],
+ 'js1_5/extensions/regress-312278': [FAIL_OK],
+ 'js1_5/extensions/regress-313630': [FAIL_OK],
+ 'js1_5/extensions/regress-313763': [FAIL_OK],
+ 'js1_5/extensions/regress-313803': [FAIL_OK],
+ 'js1_5/extensions/regress-314874': [FAIL_OK],
+ 'js1_5/extensions/regress-322957': [FAIL_OK],
+ 'js1_5/extensions/regress-328556': [FAIL_OK],
+ 'js1_5/extensions/regress-333541': [FAIL_OK],
+ 'js1_5/extensions/regress-335700': [FAIL_OK],
+ 'js1_5/extensions/regress-336409-1': [FAIL_OK],
+ 'js1_5/extensions/regress-336409-2': [FAIL_OK],
+ 'js1_5/extensions/regress-336410-2': [FAIL_OK],
+ 'js1_5/extensions/regress-341956-01': [FAIL_OK],
+ 'js1_5/extensions/regress-345967': [FAIL_OK],
+ 'js1_5/extensions/regress-346494-01': [FAIL_OK],
+ 'js1_5/extensions/regress-346494': [FAIL_OK],
+ 'js1_5/extensions/regress-347306-02': [FAIL_OK],
+ 'js1_5/extensions/regress-348986': [FAIL_OK],
+ 'js1_5/extensions/regress-349616': [FAIL_OK],
+ 'js1_5/extensions/regress-350312-02': [FAIL_OK],
+ 'js1_5/extensions/regress-350312-03': [FAIL_OK],
+ 'js1_5/extensions/regress-350531': [FAIL_OK],
+ 'js1_5/extensions/regress-351102-01': [FAIL_OK],
+ 'js1_5/extensions/regress-351102-02': [FAIL_OK],
+ 'js1_5/extensions/regress-351102-06': [FAIL_OK],
+ 'js1_5/extensions/regress-351973': [FAIL_OK],
+ 'js1_5/extensions/regress-352060': [FAIL_OK],
+ 'js1_5/extensions/regress-352094': [FAIL_OK],
+ 'js1_5/extensions/regress-352261': [FAIL_OK],
+ 'js1_5/extensions/regress-352281': [FAIL_OK],
+ 'js1_5/extensions/regress-352455': [FAIL_OK],
+ 'js1_5/extensions/regress-352604': [FAIL_OK],
+ 'js1_5/extensions/regress-353214': [FAIL_OK],
+ 'js1_5/extensions/regress-355339': [FAIL_OK],
+ 'js1_5/extensions/regress-355497': [FAIL_OK],
+ 'js1_5/extensions/regress-355622': [FAIL_OK],
+ 'js1_5/extensions/regress-355736': [FAIL_OK],
+ 'js1_5/extensions/regress-356085': [FAIL_OK],
+ 'js1_5/extensions/regress-356106': [FAIL_OK],
+ 'js1_5/extensions/regress-358594-01': [FAIL_OK],
+ 'js1_5/extensions/regress-358594-02': [FAIL_OK],
+ 'js1_5/extensions/regress-358594-03': [FAIL_OK],
+ 'js1_5/extensions/regress-358594-04': [FAIL_OK],
+ 'js1_5/extensions/regress-358594-05': [FAIL_OK],
+ 'js1_5/extensions/regress-358594-06': [FAIL_OK],
+ 'js1_5/extensions/regress-361346': [FAIL_OK],
+ 'js1_5/extensions/regress-361360': [FAIL_OK],
+ 'js1_5/extensions/regress-361558': [FAIL_OK],
+ 'js1_5/extensions/regress-361571': [FAIL_OK],
+ 'js1_5/extensions/regress-361856': [FAIL_OK],
+ 'js1_5/extensions/regress-361964': [FAIL_OK],
+ 'js1_5/extensions/regress-363988': [FAIL_OK],
+ 'js1_5/extensions/regress-365869': [FAIL_OK],
+ 'js1_5/extensions/regress-367630': [FAIL_OK],
+ 'js1_5/extensions/regress-367923': [FAIL_OK],
+ 'js1_5/extensions/regress-368859': [FAIL_OK],
+ 'js1_5/extensions/regress-369696-01': [FAIL_OK],
+ 'js1_5/extensions/regress-369696-02': [FAIL_OK],
+ 'js1_5/extensions/regress-369696-03': [FAIL_OK],
+ 'js1_5/extensions/regress-374589': [FAIL_OK],
+ 'js1_5/extensions/regress-375801': [FAIL_OK],
+ 'js1_5/extensions/regress-376052': [FAIL_OK],
+ 'js1_5/extensions/regress-379523': [FAIL_OK],
+ 'js1_5/extensions/regress-380581': [FAIL_OK],
+ 'js1_5/extensions/regress-380831': [FAIL_OK],
+ 'js1_5/extensions/regress-381205': [FAIL_OK],
+ 'js1_5/extensions/regress-381211': [FAIL_OK],
+ 'js1_5/extensions/regress-381304': [FAIL_OK],
+ 'js1_5/extensions/regress-382509': [FAIL_OK],
+ 'js1_5/extensions/regress-383965': [FAIL_OK],
+ 'js1_5/extensions/regress-384680': [FAIL_OK],
+ 'js1_5/extensions/regress-385393-09': [FAIL_OK],
+ 'js1_5/extensions/regress-407501': [FAIL_OK],
+ 'js1_5/extensions/regress-418730': [FAIL_OK],
+ 'js1_5/extensions/regress-420612': [FAIL_OK],
+ 'js1_5/extensions/regress-420869-01': [FAIL_OK],
+ 'js1_5/extensions/regress-424257': [FAIL_OK],
+ 'js1_5/extensions/regress-424683-01': [FAIL_OK],
+ 'js1_5/extensions/regress-429739': [FAIL_OK],
+ 'js1_5/extensions/regress-454142': [FAIL_OK],
+ 'js1_5/extensions/regress-465145': [FAIL_OK],
+ 'js1_5/extensions/regress-469625': [FAIL_OK],
+ 'js1_5/extensions/regress-472787': [FAIL_OK],
+ 'js1_5/extensions/regress-44009': [FAIL_OK],
+ 'js1_5/extensions/regress-50447-1': [FAIL_OK],
+ 'js1_5/extensions/regress-50447': [FAIL_OK],
+ 'js1_5/extensions/regress-90596-001': [FAIL_OK],
+ 'js1_5/extensions/regress-90596-002': [FAIL_OK],
+ 'js1_5/extensions/regress-96284-001': [FAIL_OK],
+ 'js1_5/extensions/regress-96284-002': [FAIL_OK],
+ 'js1_5/extensions/toLocaleFormat-01': [FAIL_OK],
+ 'js1_5/extensions/toLocaleFormat-02': [FAIL_OK],
+
+ 'js1_5/extensions/regress-330569': [TIMEOUT],
+ 'js1_5/extensions/regress-351448': [TIMEOUT],
+ # In the 64-bit version, this test takes longer to run out of memory
+ # than it does in the 32-bit version when attempting to generate a huge
+ # error message in debug mode.
+ 'js1_5/extensions/regress-336410-1': [FAIL_OK, ['mode == debug and arch == x64', TIMEOUT]],
+
+ ##################### DECOMPILATION TESTS #####################
+
+ # We don't really about the outcome of running the
+ # decompilation tests as long as they don't crash or
+ # timeout.
+
+ 'js1_5/decompilation/regress-344120': [PASS, FAIL],
+ 'js1_5/decompilation/regress-346892': [PASS, FAIL],
+ 'js1_5/decompilation/regress-346902': [PASS, FAIL],
+ 'js1_5/decompilation/regress-346904': [PASS, FAIL],
+ 'js1_5/decompilation/regress-346915': [PASS, FAIL],
+ 'js1_5/decompilation/regress-349484': [PASS, FAIL],
+ 'js1_5/decompilation/regress-349489': [PASS, FAIL],
+ 'js1_5/decompilation/regress-349491': [PASS, FAIL],
+ 'js1_5/decompilation/regress-349596': [PASS, FAIL],
+ 'js1_5/decompilation/regress-349650': [PASS, FAIL],
+ 'js1_5/decompilation/regress-349663': [PASS, FAIL],
+ 'js1_5/decompilation/regress-350242': [PASS, FAIL],
+ 'js1_5/decompilation/regress-350263': [PASS, FAIL],
+ 'js1_5/decompilation/regress-350271': [PASS, FAIL],
+ 'js1_5/decompilation/regress-350666': [PASS, FAIL],
+ 'js1_5/decompilation/regress-350670': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351104': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351219': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351336': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351597': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351625': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351626': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351693': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351705': [PASS, FAIL],
+ 'js1_5/decompilation/regress-351793': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352013': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352022': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352073': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352202': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352312': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352360': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352375': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352453': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352649': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352873-01': [PASS, FAIL],
+ 'js1_5/decompilation/regress-352873-02': [PASS, FAIL],
+ 'js1_5/decompilation/regress-353000': [PASS, FAIL],
+ 'js1_5/decompilation/regress-353120': [PASS, FAIL],
+ 'js1_5/decompilation/regress-353146': [PASS, FAIL],
+ 'js1_5/decompilation/regress-354878': [PASS, FAIL],
+ 'js1_5/decompilation/regress-354910': [PASS, FAIL],
+ 'js1_5/decompilation/regress-355992': [PASS, FAIL],
+ 'js1_5/decompilation/regress-356083': [PASS, FAIL],
+ 'js1_5/decompilation/regress-356248': [PASS, FAIL],
+ 'js1_5/decompilation/regress-371692': [PASS, FAIL],
+ 'js1_5/decompilation/regress-373678': [PASS, FAIL],
+ 'js1_5/decompilation/regress-375639': [PASS, FAIL],
+ 'js1_5/decompilation/regress-375882': [PASS, FAIL],
+ 'js1_5/decompilation/regress-376564': [PASS, FAIL],
+ 'js1_5/decompilation/regress-383721': [PASS, FAIL],
+ 'js1_5/decompilation/regress-406555': [PASS, FAIL],
+ 'js1_5/decompilation/regress-460870': [PASS, FAIL],
+}], # ALWAYS
+
+
+['arch == arm', {
+
+ # BUG(3251229): Times out when running new crankshaft test script.
+ 'ecma_3/RegExp/regress-311414': [SKIP],
+ 'ecma/Date/15.9.5.8': [SKIP],
+ 'ecma/Date/15.9.5.10-2': [SKIP],
+ 'ecma/Date/15.9.5.11-2': [SKIP],
+ 'ecma/Date/15.9.5.12-2': [SKIP],
+ 'js1_5/Array/regress-99120-02': [SKIP],
+ 'js1_5/extensions/regress-371636': [SKIP],
+ 'js1_5/Regress/regress-203278-1': [SKIP],
+ 'js1_5/Regress/regress-404755': [SKIP],
+ 'js1_5/Regress/regress-451322': [SKIP],
+
+
+ # BUG(1040): Allow this test to timeout.
+ 'js1_5/GC/regress-203278-2': [PASS, TIMEOUT],
+}], # 'arch == arm'
+
+
+['arch == mipsel', {
+
+ # BUG(3251229): Times out when running new crankshaft test script.
+ 'ecma_3/RegExp/regress-311414': [SKIP],
+ 'ecma/Date/15.9.5.8': [SKIP],
+ 'ecma/Date/15.9.5.10-2': [SKIP],
+ 'ecma/Date/15.9.5.11-2': [SKIP],
+ 'ecma/Date/15.9.5.12-2': [SKIP],
+ 'js1_5/Array/regress-99120-02': [SKIP],
+ 'js1_5/extensions/regress-371636': [SKIP],
+ 'js1_5/Regress/regress-203278-1': [SKIP],
+ 'js1_5/Regress/regress-404755': [SKIP],
+ 'js1_5/Regress/regress-451322': [SKIP],
+
+
+ # BUG(1040): Allow this test to timeout.
+ 'js1_5/GC/regress-203278-2': [PASS, TIMEOUT],
+}], # 'arch == mipsel'
+]
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index 40c5caf742..9d69988f71 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -25,14 +25,20 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix preparser
+[
+[ALWAYS, {
+ # TODO(mstarzinger): This script parses but throws a TypeError when run.
+ 'non-alphanum': [FAIL],
-# We don't parse RegExps at scanning time, so we can't fail on octal
-# escapes (we need to parse to distinguish octal escapes from valid
-# back-references).
-strict-octal-regexp: FAIL
+ # We don't parse RegExps at scanning time, so we can't fail on octal
+ # escapes (we need to parse to distinguish octal escapes from valid
+ # back-references).
+ 'strict-octal-regexp': [FAIL],
+}], # ALWAYS
-[ $arch == android_arm || $arch == android_ia32 ]
-# Remove this once the issue above is fixed. Android test runner does not
-# handle "FAIL" test expectation correctly.
-strict-octal-regexp: SKIP
+['arch == android_arm or arch == android_ia32', {
+ # Remove this once the issue above is fixed. Android test runner does not
+ # handle "FAIL" test expectation correctly.
+ 'strict-octal-regexp': [SKIP],
+}], # 'arch == android_arm or arch == android_ia32'
+]
diff --git a/deps/v8/test/preparser/strict-identifiers.pyt b/deps/v8/test/preparser/strict-identifiers.pyt
index f979088689..446980f701 100644
--- a/deps/v8/test/preparser/strict-identifiers.pyt
+++ b/deps/v8/test/preparser/strict-identifiers.pyt
@@ -147,25 +147,25 @@ label_strict = StrictTemplate("label-strict-$id", """
""")
break_normal = Template("break-normal-$id", """
- for (;;) {
+ $id: for (;false;) {
break $id;
}
""")
break_strict = StrictTemplate("break-strict-$id", """
- for (;;) {
+ $id: for (;false;) {
break $id;
}
""")
continue_normal = Template("continue-normal-$id", """
- for (;;) {
+ $id: for (;false;) {
continue $id;
}
""")
continue_strict = StrictTemplate("continue-strict-$id", """
- for (;;) {
+ $id: for (;false;) {
continue $id;
}
""")
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index 566fd5ca44..850c0a4589 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -39,7 +39,7 @@ class PreparserTestSuite(testsuite.TestSuite):
super(PreparserTestSuite, self).__init__(name, root)
def shell(self):
- return "preparser"
+ return "d8"
def _GetExpectations(self):
expects_file = os.path.join(self.root, "preparser.expectation")
@@ -64,7 +64,7 @@ class PreparserTestSuite(testsuite.TestSuite):
testname = os.path.join(filename, name)
flags = ["-e", source]
if expectation:
- flags += ["throws", expectation]
+ flags += ["--throws"]
test = testcase.TestCase(self, testname, flags=flags)
result.append(test)
def Template(name, source):
@@ -89,7 +89,7 @@ class PreparserTestSuite(testsuite.TestSuite):
throws = expectations.get(f, None)
flags = [f + ".js"]
if throws:
- flags += ["throws", throws]
+ flags += ["--throws"]
test = testcase.TestCase(self, f, flags=flags)
result.append(test)
@@ -112,7 +112,7 @@ class PreparserTestSuite(testsuite.TestSuite):
with open(testcase.flags[0]) as f:
return f.read()
- def VariantFlags(self):
+ def VariantFlags(self, testcase, default_flags):
return [[]];
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 4910939f15..e546266f3a 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -25,68 +25,97 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix test262
-def FAIL_OK = FAIL, OKAY
+[
+[ALWAYS, {
+ ############################### BUGS ###################################
-############################### BUGS ###################################
+ # Sequencing of getter side effects on receiver and argument properties
+ # is wrong. The receiver callback should be called before any arguments
+ # are evaluated.
+ # V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
+ '11.2.3-3_3': [FAIL],
-# Sequencing of getter side effects on receiver and argument properties
-# is wrong. The receiver callback should be called before any arguments
-# are evaluated.
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
-11.2.3-3_3: FAIL
+ '15.5.4.9_CE': [['no_i18n', SKIP]],
-# Strings that are considered canonically equivalent by the Unicode standard
-# return a non-zero value on String.prototype.localeCompare calls.
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=2413
-15.5.4.9_CE: FAIL
+ ######################## NEEDS INVESTIGATION ###########################
-##################### DELIBERATE INCOMPATIBILITIES #####################
+ # These test failures are specific to the intl402 suite and need investigation
+ # to be either marked as bugs with issues filed for them or as deliberate
+ # incompatibilities if the test cases turn out to be broken or ambiguous.
+ '6.2.3': [FAIL],
+ '9.2.1_2': [FAIL],
+ '9.2.5_11_g_ii_2': [FAIL],
+ '9.2.6_2': [FAIL],
+ '10.1.1_a': [FAIL],
+ '10.1.1_19_c': [PASS, FAIL],
+ '10.1.2.1_4': [FAIL],
+ '10.2.3_b': [PASS, FAIL],
+ '10.3_a': [FAIL],
+ '11.1.1_17': [PASS, FAIL],
+ '11.1.1_19': [PASS, FAIL],
+ '11.1.1_20_c': [FAIL],
+ '11.1.1_a': [FAIL],
+ '11.1.2.1_4': [FAIL],
+ '11.3.2_FN_2': [PASS, FAIL],
+ '11.3.2_TRF': [PASS, FAIL],
+ '11.3.2_TRP': [FAIL],
+ '11.3_a': [FAIL],
+ '12.1.1_a': [FAIL],
+ '12.1.2.1_4': [FAIL],
+ '12.3.2_FDT_7_a_iv': [FAIL],
+ '12.3.3': [FAIL],
+ '12.3_a': [FAIL],
+ '15.5.4.9_3': [PASS, FAIL],
-# This tests precision of Math functions. The implementation for those
-# trigonometric functions are platform/compiler dependent. Furthermore, the
-# expectation values by far deviates from the actual result given by an
-# arbitrary-precision calculator, making those tests partly bogus.
-S15.8.2.8_A6: PASS || FAIL_OK # Math.exp (less precise with --fast-math)
-S15.8.2.16_A7: PASS || FAIL_OK # Math.sin
-S15.8.2.18_A7: PASS || FAIL_OK # Math.tan
+ ##################### DELIBERATE INCOMPATIBILITIES #####################
-# Linux for ia32 (and therefore simulators) default to extended 80 bit floating
-# point formats, so these tests checking 64-bit FP precision fail. The other
-# platforms/arch's pass these tests.
-# We follow the other major JS engines by keeping this default.
-S8.5_A2.1: PASS || FAIL_OK
-S8.5_A2.2: PASS || FAIL_OK
+ # This tests precision of Math functions. The implementation for those
+ # trigonometric functions are platform/compiler dependent. Furthermore, the
+ # expectation values by far deviates from the actual result given by an
+ # arbitrary-precision calculator, making those tests partly bogus.
+ 'S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
+ 'S15.8.2.16_A7': [PASS, FAIL_OK], # Math.sin
+ 'S15.8.2.18_A7': [PASS, FAIL_OK], # Math.tan
-############################ INVALID TESTS #############################
+ # Linux for ia32 (and therefore simulators) default to extended 80 bit
+ # floating point formats, so these tests checking 64-bit FP precision fail.
+ # The other platforms/arch's pass these tests.
+ # We follow the other major JS engines by keeping this default.
+ 'S8.5_A2.1': [PASS, FAIL_OK],
+ 'S8.5_A2.2': [PASS, FAIL_OK],
-# The reference value calculated by Test262 is incorrect if you run these tests
-# in PST/PDT between first Sunday in March and first Sunday in April. The DST
-# switch was moved in 2007 whereas Test262 bases the reference value on 2000.
-# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
-S15.9.3.1_A5_T1: PASS || FAIL_OK
-S15.9.3.1_A5_T2: PASS || FAIL_OK
-S15.9.3.1_A5_T3: PASS || FAIL_OK
-S15.9.3.1_A5_T4: PASS || FAIL_OK
-S15.9.3.1_A5_T5: PASS || FAIL_OK
-S15.9.3.1_A5_T6: PASS || FAIL_OK
+ ############################ INVALID TESTS #############################
-############################ SKIPPED TESTS #############################
+ # The reference value calculated by Test262 is incorrect if you run these
+ # tests in PST/PDT between first Sunday in March and first Sunday in April.
+ # The DST switch was moved in 2007 whereas Test262 bases the reference value
+ # on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
+ 'S15.9.3.1_A5_T1': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T2': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T3': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T4': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T5': [PASS, FAIL_OK],
+ 'S15.9.3.1_A5_T6': [PASS, FAIL_OK],
-# These tests take a looong time to run in debug mode.
-S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
-S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
+ ############################ SKIPPED TESTS #############################
-[ $arch == arm || $arch == mipsel ]
+ # These tests take a looong time to run in debug mode.
+ 'S15.1.3.1_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+ 'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+}], # ALWAYS
-# TODO(mstarzinger): Causes stack overflow on simulators due to eager
-# compilation of parenthesized function literals. Needs investigation.
-S13.2.1_A1_T1: SKIP
+['arch == arm or arch == mipsel', {
-# BUG(3251225): Tests that timeout with --nocrankshaft.
-S15.1.3.1_A2.4_T1: SKIP
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.4_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-S15.1.3.3_A2.3_T1: SKIP
-S15.1.3.4_A2.3_T1: SKIP
+ # TODO(mstarzinger): Causes stack overflow on simulators due to eager
+ # compilation of parenthesized function literals. Needs investigation.
+ 'S13.2.1_A1_T1': [SKIP],
+
+ # BUG(3251225): Tests that timeout with --nocrankshaft.
+ 'S15.1.3.1_A2.4_T1': [SKIP],
+ 'S15.1.3.1_A2.5_T1': [SKIP],
+ 'S15.1.3.2_A2.4_T1': [SKIP],
+ 'S15.1.3.2_A2.5_T1': [SKIP],
+ 'S15.1.3.3_A2.3_T1': [SKIP],
+ 'S15.1.3.4_A2.3_T1': [SKIP],
+}], # 'arch == arm or arch == mipsel'
+]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index fc03504dca..89f729d9a3 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -39,8 +39,7 @@ from testrunner.objects import testcase
TEST_262_ARCHIVE_REVISION = "99aac3bc1cad" # This is the r365 revision.
TEST_262_ARCHIVE_MD5 = "aadbd720ce9bdb4f8f3de066f4d7eea1"
TEST_262_URL = "http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2"
-TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js"]
-TEST_262_SKIP = ["intl402"]
+TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js", "testIntl.js"]
class Test262TestSuite(testsuite.TestSuite):
@@ -60,8 +59,8 @@ class Test262TestSuite(testsuite.TestSuite):
for dirname, dirs, files in os.walk(self.testroot):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
- for skipped in [x for x in dirs if x in TEST_262_SKIP]:
- dirs.remove(skipped)
+ if context.noi18n and "intl402" in dirs:
+ dirs.remove("intl402")
dirs.sort()
files.sort()
for filename in files:
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 4aaf8a97fb..eba1be3f0f 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -25,11 +25,12 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Too slow in debug mode.
-dfg-int-overflow-in-loop: PASS, SKIP if $mode == debug
-dfg-double-vote-fuzz: PASS, SKIP if $mode == debug
-reentrant-caching: PASS, SKIP if $mode == debug
-sort-large-array: PASS, SKIP if $mode == debug
-
-##############################################################################
-[ $deopt_fuzzer == True ]
+[
+['mode == debug', {
+ # Too slow in debug mode.
+ 'dfg-int-overflow-in-loop': [SKIP],
+ 'dfg-double-vote-fuzz': [SKIP],
+ 'reentrant-caching': [SKIP],
+ 'sort-large-array': [SKIP],
+}], # 'mode == debug'
+]
diff --git a/deps/v8/tools/android-sync.sh b/deps/v8/tools/android-sync.sh
index 5d4ef2effd..460e92d2a3 100755
--- a/deps/v8/tools/android-sync.sh
+++ b/deps/v8/tools/android-sync.sh
@@ -88,7 +88,6 @@ function sync_dir {
echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
sync_file "$OUTDIR/$ARCH_MODE/cctest"
sync_file "$OUTDIR/$ARCH_MODE/d8"
-sync_file "$OUTDIR/$ARCH_MODE/preparser"
echo ""
echo -n "sync to $ANDROID_V8/tools"
sync_file tools/consarray.js
@@ -100,6 +99,8 @@ sync_file tools/profile_view.js
sync_file tools/logreader.js
sync_file tools/tickprocessor.js
echo ""
+sync_dir tools/profviz
+sync_dir test/intl
sync_dir test/message
sync_dir test/mjsunit
sync_dir test/preparser
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 0acb658c53..28377273ba 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -68,6 +68,7 @@ consts_misc = [
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
+ { 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
{ 'name': 'FailureTag', 'value': 'kFailureTag' },
{ 'name': 'FailureTagMask', 'value': 'kFailureTagMask' },
@@ -88,6 +89,15 @@ consts_misc = [
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
+ { 'name': 'prop_desc_key',
+ 'value': 'DescriptorArray::kDescriptorKey' },
+ { 'name': 'prop_desc_details',
+ 'value': 'DescriptorArray::kDescriptorDetails' },
+ { 'name': 'prop_desc_value',
+ 'value': 'DescriptorArray::kDescriptorValue' },
+ { 'name': 'prop_desc_size',
+ 'value': 'DescriptorArray::kDescriptorSize' },
+
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_marker',
@@ -113,7 +123,9 @@ extras_accessors = [
'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
+ 'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
+ 'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
];
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 317a7d6a91..d09c042204 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1029,7 +1029,8 @@ class Map(HeapObject):
class String(HeapObject):
def LengthOffset(self):
- return self.heap.PointerSize()
+ # First word after the map is the hash, the second is the length.
+ return self.heap.PointerSize() * 2
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1215,18 +1216,18 @@ class DescriptorArray(object):
def Deleted(self, value):
return self.Decode(6, 1, value) == 1
- def Storage(self, value):
- return self.Decode(7, 11, value)
+ def FieldIndex(self, value):
+ return self.Decode(20, 11, value)
def Pointer(self, value):
- return self.Decode(18, 11, value)
+ return self.Decode(6, 11, value)
def Details(self, di, value):
return (
di,
self.Type(value),
self.Attributes(value),
- self.Storage(value),
+ self.FieldIndex(value),
self.Pointer(value)
)
@@ -1242,7 +1243,7 @@ class DescriptorArray(object):
i = 2 + di * 3
p.Print("0x%x" % (array.address + array.MemberOffset(i)))
p.Print("[%i] name: %s" % (di, array.Get(i + 0)))
- p.Print("[%i] details: %s %s enum %i pointer %i" % \
+ p.Print("[%i] details: %s %s field-index %i pointer %i" % \
self.Details(di, array.Get(i + 1)))
p.Print("[%i] value: %s" % (di, array.Get(i + 2)))
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index aa01a842f6..f3097ef7d5 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -58,6 +58,7 @@
['component=="shared_library"', {
'type': '<(component)',
'sources': [
+ '../../src/defaults.cc',
# Note: on non-Windows we still build this file so that gyp
# has some sources to link into the component.
'../../src/v8dll-main.cc',
@@ -208,6 +209,10 @@
'../../src/accessors.h',
'../../src/allocation.cc',
'../../src/allocation.h',
+ '../../src/allocation-site-scopes.cc',
+ '../../src/allocation-site-scopes.h',
+ '../../src/allocation-tracker.cc',
+ '../../src/allocation-tracker.h',
'../../src/api.cc',
'../../src/api.h',
'../../src/apiutils.h',
@@ -333,16 +338,17 @@
'../../src/hydrogen-bch.h',
'../../src/hydrogen-canonicalize.cc',
'../../src/hydrogen-canonicalize.h',
+ '../../src/hydrogen-check-elimination.cc',
+ '../../src/hydrogen-check-elimination.h',
'../../src/hydrogen-dce.cc',
'../../src/hydrogen-dce.h',
'../../src/hydrogen-dehoist.cc',
'../../src/hydrogen-dehoist.h',
- '../../src/hydrogen-deoptimizing-mark.cc',
- '../../src/hydrogen-deoptimizing-mark.h',
'../../src/hydrogen-environment-liveness.cc',
'../../src/hydrogen-environment-liveness.h',
'../../src/hydrogen-escape-analysis.cc',
'../../src/hydrogen-escape-analysis.h',
+ '../../src/hydrogen-flow-engine.h',
'../../src/hydrogen-instructions.cc',
'../../src/hydrogen-instructions.h',
'../../src/hydrogen.cc',
@@ -353,8 +359,12 @@
'../../src/hydrogen-infer-representation.h',
'../../src/hydrogen-infer-types.cc',
'../../src/hydrogen-infer-types.h',
+ '../../src/hydrogen-load-elimination.cc',
+ '../../src/hydrogen-load-elimination.h',
'../../src/hydrogen-mark-deoptimize.cc',
'../../src/hydrogen-mark-deoptimize.h',
+ '../../src/hydrogen-mark-unreachable.cc',
+ '../../src/hydrogen-mark-unreachable.h',
'../../src/hydrogen-minus-zero.cc',
'../../src/hydrogen-minus-zero.h',
'../../src/hydrogen-osr.cc',
@@ -397,6 +407,8 @@
'../../src/lithium-allocator-inl.h',
'../../src/lithium-allocator.cc',
'../../src/lithium-allocator.h',
+ '../../src/lithium-codegen.cc',
+ '../../src/lithium-codegen.h',
'../../src/lithium.cc',
'../../src/lithium.h',
'../../src/liveedit.cc',
@@ -409,8 +421,6 @@
'../../src/macro-assembler.h',
'../../src/mark-compact.cc',
'../../src/mark-compact.h',
- '../../src/marking-thread.h',
- '../../src/marking-thread.cc',
'../../src/messages.cc',
'../../src/messages.h',
'../../src/natives.h',
@@ -430,7 +440,6 @@
'../../src/platform/elapsed-timer.h',
'../../src/platform/time.cc',
'../../src/platform/time.h',
- '../../src/platform-posix.h',
'../../src/platform.h',
'../../src/platform/condition-variable.cc',
'../../src/platform/condition-variable.h',
@@ -804,6 +813,9 @@
]},
],
['OS=="win"', {
+ 'defines': [
+ '_CRT_RAND_S' # for rand_s()
+ ],
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
},
@@ -847,6 +859,10 @@
'BUILDING_V8_SHARED',
'V8_SHARED',
],
+ }, {
+ 'sources': [
+ '../../src/defaults.cc',
+ ],
}],
['v8_postmortem_support=="true"', {
'sources': [
@@ -855,8 +871,8 @@
}],
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
]
}, { # v8_enable_i18n_support==0
'sources!': [
@@ -866,7 +882,7 @@
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
],
}],
],
@@ -922,6 +938,7 @@
'../../src/array-iterator.js',
'../../src/harmony-string.js',
'../../src/harmony-array.js',
+ '../../src/harmony-math.js'
],
},
'actions': [
diff --git a/deps/v8/tools/merge-to-branch.sh b/deps/v8/tools/merge-to-branch.sh
index e0011edff0..260dc8a149 100755
--- a/deps/v8/tools/merge-to-branch.sh
+++ b/deps/v8/tools/merge-to-branch.sh
@@ -229,7 +229,8 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
git checkout $BRANCHNAME \
|| die "cannot ensure that the current branch is $BRANCHNAME"
wait_for_lgtm
- git cl dcommit || die "failed to commit to $MERGE_TO_BRANCH"
+ PRESUBMIT_TREE_CHECK="skip" git cl dcommit \
+ || die "failed to commit to $MERGE_TO_BRANCH"
fi
let CURRENT_STEP+=1
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 12475b33c4..1ab6347774 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -226,7 +226,7 @@ class CppLintProcessor(SourceFileProcessor):
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
- return ['src', 'preparser', 'include', 'samples', join('test', 'cctest')]
+ return ['src', 'include', 'samples', join('test', 'cctest')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
@@ -282,8 +282,8 @@ class SourceProcessor(SourceFileProcessor):
Check that all files include a copyright notice and no trailing whitespaces.
"""
- RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
- 'SConstruct', '.status', '.gyp', '.gypi']
+ RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
+ '.status', '.gyp', '.gypi']
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
@@ -292,7 +292,7 @@ class SourceProcessor(SourceFileProcessor):
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
- for dir_part in os.path.dirname(file).split(os.sep):
+ for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
diff --git a/deps/v8/tools/profviz/composer.js b/deps/v8/tools/profviz/composer.js
index 44dd7639de..0c9437ff54 100644
--- a/deps/v8/tools/profviz/composer.js
+++ b/deps/v8/tools/profviz/composer.js
@@ -497,6 +497,8 @@ function PlotScriptComposer(kResX, kResY, error_output) {
}
// Label the longest pauses.
+ execution_pauses =
+ RestrictRangesTo(execution_pauses, range_start, range_end);
execution_pauses.sort(
function(a, b) { return b.duration() - a.duration(); });
diff --git a/deps/v8/tools/push-to-trunk.sh b/deps/v8/tools/push-to-trunk.sh
index 8512d12877..c91cd19f9a 100755
--- a/deps/v8/tools/push-to-trunk.sh
+++ b/deps/v8/tools/push-to-trunk.sh
@@ -211,7 +211,8 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
};
print $0;
}' > "$CHANGELOG_ENTRY_FILE"
- git cl dcommit || die "'git cl dcommit' failed, please try again."
+ PRESUBMIT_TREE_CHECK="skip" git cl dcommit \
+ || die "'git cl dcommit' failed, please try again."
fi
let CURRENT_STEP+=1
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index f8cc93748a..292cf002f9 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -358,7 +358,8 @@ def Execute(arch, mode, args, options, suites, workspace):
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
- options.extra_flags)
+ options.extra_flags,
+ False)
# Find available test suites and read test cases from them.
variables = {
@@ -367,6 +368,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"system": utils.GuessOS(),
"isolates": options.isolates,
"deopt_fuzzer": True,
+ "no_i18n": False,
}
all_tests = []
num_tests = 0
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 48682d4444..2fdbeb9d65 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -112,6 +112,9 @@ def BuildOptions():
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
+ result.add_option("--no-i18n", "--noi18n",
+ help="Skip internationalization tests",
+ default=False, action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
@@ -122,6 +125,9 @@ def BuildOptions():
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
+ result.add_option("--no-variants", "--novariants",
+ help="Don't run any testing variants",
+ default=False, dest="no_variants", action="store_true")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
@@ -194,8 +200,18 @@ def ProcessOptions(options):
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
+
+ def excl(*args):
+ """Returns true if zero or one of multiple arguments are true."""
+ return reduce(lambda x, y: x + y, args) <= 1
+
+ if not excl(options.no_stress, options.stress_only, options.no_variants):
+ print "Use only one of --no-stress, --stress-only or --no-variants."
+ return False
if options.no_stress:
VARIANT_FLAGS = [[], ["--nocrankshaft"]]
+ if options.no_variants:
+ VARIANT_FLAGS = [[]]
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
@@ -210,6 +226,8 @@ def ProcessOptions(options):
if not options.flaky_tests in ["run", "skip", "dontcare"]:
print "Unknown flaky test mode %s" % options.flaky_tests
return False
+ if not options.no_i18n:
+ DEFAULT_TESTS.append("intl")
return True
@@ -302,7 +320,8 @@ def Execute(arch, mode, args, options, suites, workspace):
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
- options.extra_flags)
+ options.extra_flags,
+ options.no_i18n)
# Find available test suites and read test cases from them.
variables = {
@@ -311,6 +330,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"system": utils.GuessOS(),
"isolates": options.isolates,
"deopt_fuzzer": False,
+ "no_i18n": options.no_i18n,
}
all_tests = []
num_tests = 0
@@ -325,8 +345,9 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.cat:
verbose.PrintTestSource(s.tests)
continue
- variant_flags = s.VariantFlags() or VARIANT_FLAGS
- s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ]
+ s.tests = [ t.CopyAddingFlags(v)
+ for t in s.tests
+ for v in s.VariantFlags(t, VARIANT_FLAGS) ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
diff --git a/deps/v8/tools/sodium/index.html b/deps/v8/tools/sodium/index.html
new file mode 100644
index 0000000000..cbfe49902d
--- /dev/null
+++ b/deps/v8/tools/sodium/index.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Sodium</title>
+ <meta charset="utf-8">
+ <link href="styles.css" rel="stylesheet" type="text/css">
+ </head>
+ <script src="https://google-code-prettify.googlecode.com/svn/loader/run_prettify.js"></script>
+ <script src="./sodium.js"></script>
+ <script type="text/javascript"></script>
+ <body>
+ <table style='top:5px; width:100%;'>
+ <tr><td id='table-header'>
+ <input type='file' id='log-file-id' />
+ <select id="kind-selector-id" onchange="Sodium.kindChangedHandler(this);"></select><br>
+ <select id="function-selector-id" onchange="Sodium.functionChangedHandler();"></select>
+ </td></tr>
+ <tr>
+ <table style='height:90%;'>
+ <tr>
+ <td id='asm-container'>
+ <div id='asm-text'></div>
+ </td>
+ <td id='source-container'>
+ <div id='source-text'><pre id='source-text-pre'/></div>
+ </td>
+ </tr>
+ </table>
+ </tr>
+ </table>
+ <script>
+ Sodium.buildFunctionKindSelector(document.getElementById('kind-selector-id'));
+ document.getElementById('log-file-id').addEventListener('change', Sodium.readLog, false);
+ </script>
+ </body>
+</html>
diff --git a/deps/v8/tools/sodium/sodium.js b/deps/v8/tools/sodium/sodium.js
new file mode 100644
index 0000000000..44475a177f
--- /dev/null
+++ b/deps/v8/tools/sodium/sodium.js
@@ -0,0 +1,409 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var Sodium = (function() {
+ "use strict";
+
+ var kinds = ["FUNCTION", "OPTIMIZED_FUNCTION", "STUB", "BUILTIN",
+ "LOAD_IC", "KEYED_LOAD_IC", "CALL_IC", "KEYED_CALL_IC",
+ "STORE_IC", "KEYED_STORE_IC", "BINARY_OP_IC", "COMPARE_IC",
+ "COMPARE_NIL_IC", "TO_BOOLEAN_IC"];
+ var kindsWithSource = {
+ 'FUNCTION': true,
+ 'OPTIMIZED_FUNCTION': true
+ };
+
+ var addressRegEx = "0x[0-9a-f]{8,16}";
+ var nameFinder = new RegExp("^name = (.+)$");
+ var kindFinder = new RegExp("^kind = (.+)$");
+ var firstPositionFinder = new RegExp("^source_position = (\\d+)$");
+ var separatorFilter = new RegExp("^--- (.)+ ---$");
+ var rawSourceFilter = new RegExp("^--- Raw source ---$");
+ var codeEndFinder = new RegExp("^--- End code ---$");
+ var whiteSpaceLineFinder = new RegExp("^\\W*$");
+ var instructionBeginFinder =
+ new RegExp("^Instructions\\W+\\(size = \\d+\\)");
+ var instructionFinder =
+ new RegExp("^\(" + addressRegEx + "\)\(\\W+\\d+\\W+.+\)");
+ var positionFinder =
+ new RegExp("^(" + addressRegEx + ")\\W+position\\W+\\((\\d+)\\)");
+ var addressFinder = new RegExp("\(" + addressRegEx + "\)");
+ var addressReplacer = new RegExp("\(" + addressRegEx + "\)", "gi");
+
+ var fileContent = "";
+ var selectedFunctionKind = "";
+ var currentFunctionKind = "";
+
+ var currentFunctionName = "";
+ var firstSourcePosition = 0;
+ var startAddress = "";
+ var readingSource = false;
+ var readingAsm = false;
+ var sourceBegin = -1;
+ var sourceEnd = -1;
+ var asmBegin = -1;
+ var asmEnd = -1;
+ var codeObjects = [];
+ var selectedAsm = null;
+ var selectedSource = null;
+ var selectedSourceClass = "";
+
+ function Code(name, kind, sourceBegin, sourceEnd, asmBegin, asmEnd,
+ firstSourcePosition, startAddress) {
+ this.name = name;
+ this.kind = kind;
+ this.sourceBegin = sourceBegin;
+ this.sourceEnd = sourceEnd;
+ this.asmBegin = asmBegin;
+ this.asmEnd = asmEnd;
+ this.firstSourcePosition = firstSourcePosition;
+ this.startAddress = startAddress;
+ }
+
+ function getCurrentCodeObject() {
+ var functionSelect = document.getElementById('function-selector-id');
+ return functionSelect.options[functionSelect.selectedIndex].codeObject;
+ }
+
+ function getCurrentSourceText() {
+ var code = getCurrentCodeObject();
+ if (code.sourceBegin == -1 || code.sourceEnd == -1) return "";
+ return fileContent.substring(code.sourceBegin, code.sourceEnd);
+ }
+
+ function getCurrentAsmText() {
+ var code = getCurrentCodeObject();
+ if (code.asmBegin == -1 || code.asmEnd == -1) return "";
+ return fileContent.substring(code.asmBegin, code.asmEnd);
+ }
+
+ function setKindByIndex(index) {
+ selectedFunctionKind = kinds[index];
+ }
+
+ function processLine(text, begin, end) {
+ var line = text.substring(begin, end);
+ if (readingSource) {
+ if (separatorFilter.exec(line) != null) {
+ readingSource = false;
+ } else {
+ if (sourceBegin == -1) {
+ sourceBegin = begin;
+ }
+ sourceEnd = end;
+ }
+ } else {
+ if (readingAsm) {
+ if (codeEndFinder.exec(line) != null) {
+ readingAsm = false;
+ asmEnd = begin;
+ var newCode =
+ new Code(currentFunctionName, currentFunctionKind,
+ sourceBegin, sourceEnd, asmBegin, asmEnd,
+ firstSourcePosition, startAddress);
+ codeObjects.push(newCode);
+ currentFunctionKind = null;
+ } else {
+ if (asmBegin == -1) {
+ matches = instructionBeginFinder.exec(line);
+ if (matches != null) {
+ asmBegin = begin;
+ }
+ }
+ if (startAddress == "") {
+ matches = instructionFinder.exec(line);
+ if (matches != null) {
+ startAddress = matches[1];
+ }
+ }
+ }
+ } else {
+ var matches = kindFinder.exec(line);
+ if (matches != null) {
+ currentFunctionKind = matches[1];
+ if (!kindsWithSource[currentFunctionKind]) {
+ sourceBegin = -1;
+ sourceEnd = -1;
+ }
+ } else if (currentFunctionKind != null) {
+ matches = nameFinder.exec(line);
+ if (matches != null) {
+ readingAsm = true;
+ asmBegin = -1;
+ currentFunctionName = matches[1];
+ }
+ } else if (rawSourceFilter.exec(line) != null) {
+ readingSource = true;
+ sourceBegin = -1;
+ } else {
+ var matches = firstPositionFinder.exec(line);
+ if (matches != null) {
+ firstSourcePosition = parseInt(matches[1]);
+ }
+ }
+ }
+ }
+ }
+
+ function processLines(source, size, processLine) {
+ var firstChar = 0;
+ for (var x = 0; x < size; x++) {
+ var curChar = source[x];
+ if (curChar == '\n' || curChar == '\r') {
+ processLine(source, firstChar, x);
+ firstChar = x + 1;
+ }
+ }
+ if (firstChar != size - 1) {
+ processLine(source, firstChar, size - 1);
+ }
+ }
+
+ function processFileContent() {
+ document.getElementById('source-text-pre').innerHTML = '';
+ sourceBegin = -1;
+ codeObjects = [];
+ processLines(fileContent, fileContent.length, processLine);
+ var functionSelectElement = document.getElementById('function-selector-id');
+ functionSelectElement.innerHTML = '';
+ var length = codeObjects.length;
+ for (var i = 0; i < codeObjects.length; ++i) {
+ var code = codeObjects[i];
+ if (code.kind == selectedFunctionKind) {
+ var optionElement = document.createElement("option");
+ optionElement.codeObject = code;
+ optionElement.text = code.name;
+ functionSelectElement.add(optionElement, null);
+ }
+ }
+ }
+
+ function asmClick(element) {
+ if (element == selectedAsm) return;
+ if (selectedAsm != null) {
+ selectedAsm.classList.remove('highlight-yellow');
+ }
+ selectedAsm = element;
+ selectedAsm.classList.add('highlight-yellow');
+
+ var pc = element.firstChild.innerText;
+ var sourceLine = null;
+ if (addressFinder.exec(pc) != null) {
+ var position = findSourcePosition(pc);
+ var line = findSourceLine(position);
+ sourceLine = document.getElementById('source-line-' + line);
+ var sourceLineTop = sourceLine.offsetTop;
+ makeSourcePosVisible(sourceLineTop);
+ }
+ if (selectedSource == sourceLine) return;
+ if (selectedSource != null) {
+ selectedSource.classList.remove('highlight-yellow');
+ selectedSource.classList.add(selectedSourceClass);
+ }
+ if (sourceLine != null) {
+ selectedSourceClass = sourceLine.classList[0];
+ sourceLine.classList.remove(selectedSourceClass);
+ sourceLine.classList.add('highlight-yellow');
+ }
+ selectedSource = sourceLine;
+ }
+
+ function makeContainerPosVisible(container, newTop) {
+ var height = container.offsetHeight;
+ var margin = Math.floor(height / 4);
+ if (newTop < container.scrollTop + margin) {
+ newTop -= margin;
+ if (newTop < 0) newTop = 0;
+ container.scrollTop = newTop;
+ return;
+ }
+ if (newTop > (container.scrollTop + 3 * margin)) {
+ newTop = newTop - 3 * margin;
+ container.scrollTop = newTop;
+ }
+ }
+
+ function makeAsmPosVisible(newTop) {
+ var asmContainer = document.getElementById('asm-container');
+ makeContainerPosVisible(asmContainer, newTop);
+ }
+
+ function makeSourcePosVisible(newTop) {
+ var sourceContainer = document.getElementById('source-container');
+ makeContainerPosVisible(sourceContainer, newTop);
+ }
+
+ function addressClick(element, event) {
+ event.stopPropagation();
+ var asmLineId = 'address-' + element.innerText;
+ var asmLineElement = document.getElementById(asmLineId);
+ if (asmLineElement != null) {
+ var asmLineTop = asmLineElement.parentNode.offsetTop;
+ makeAsmPosVisible(asmLineTop);
+ asmLineElement.classList.add('highlight-flash-blue');
+ window.setTimeout(function() {
+ asmLineElement.classList.remove('highlight-flash-blue');
+ }, 1500);
+ }
+ }
+
+ function prepareAsm(originalSource) {
+ var newSource = "";
+ var lineNumber = 1;
+ var functionProcessLine = function(text, begin, end) {
+ var currentLine = text.substring(begin, end);
+ var matches = instructionFinder.exec(currentLine);
+ var clickHandler = "";
+ if (matches != null) {
+ var restOfLine = matches[2];
+ restOfLine = restOfLine.replace(
+ addressReplacer,
+ '<span class="hover-underline" ' +
+ 'onclick="Sodium.addressClick(this, event);">\$1</span>');
+ currentLine = '<span id="address-' + matches[1] + '" >' +
+ matches[1] + '</span>' + restOfLine;
+ clickHandler = 'onclick=\'Sodium.asmClick(this)\' ';
+ } else if (whiteSpaceLineFinder.exec(currentLine)) {
+ currentLine = "<br>";
+ }
+ newSource += '<pre style=\'margin-bottom: -12px;\' ' + clickHandler + '>' +
+ currentLine + '</pre>';
+ lineNumber++;
+ }
+ processLines(originalSource, originalSource.length, functionProcessLine);
+ return newSource;
+ }
+
+ function findSourcePosition(pcToSearch) {
+ var position = 0;
+ var distance = 0x7FFFFFFF;
+ var pcToSearchOffset = parseInt(pcToSearch);
+ var processOneLine = function(text, begin, end) {
+ var currentLine = text.substring(begin, end);
+ var matches = positionFinder.exec(currentLine);
+ if (matches != null) {
+ var pcOffset = parseInt(matches[1]);
+ if (pcOffset <= pcToSearchOffset) {
+ var dist = pcToSearchOffset - pcOffset;
+ var pos = parseInt(matches[2]);
+ if ((dist < distance) || (dist == distance && pos > position)) {
+ position = pos;
+ distance = dist;
+ }
+ }
+ }
+ }
+ var asmText = getCurrentAsmText();
+ processLines(asmText, asmText.length, processOneLine);
+ var code = getCurrentCodeObject();
+ if (position == 0) return 0;
+ return position - code.firstSourcePosition;
+ }
+
+ function findSourceLine(position) {
+ if (position == 0) return 1;
+ var line = 0;
+ var processOneLine = function(text, begin, end) {
+ if (begin < position) {
+ line++;
+ }
+ }
+ var sourceText = getCurrentSourceText();
+ processLines(sourceText, sourceText.length, processOneLine);
+ return line;
+ }
+
+ function functionChangedHandler() {
+ var functionSelect = document.getElementById('function-selector-id');
+ var source = getCurrentSourceText();
+ var sourceDivElement = document.getElementById('source-text');
+ var code = getCurrentCodeObject();
+ var newHtml = "<pre class=\"prettyprint linenums\" id=\"source-text\">"
+ + 'function ' + code.name + source + "</pre>";
+ sourceDivElement.innerHTML = newHtml;
+ try {
+ // Wrap in try to work when offline.
+ PR.prettyPrint();
+ } catch (e) {
+ }
+ var sourceLineContainer = sourceDivElement.firstChild.firstChild;
+ var lineCount = sourceLineContainer.childElementCount;
+ var current = sourceLineContainer.firstChild;
+ for (var i = 1; i < lineCount; ++i) {
+ current.id = "source-line-" + i;
+ current = current.nextElementSibling;
+ }
+
+ var asm = getCurrentAsmText();
+ document.getElementById('asm-text').innerHTML = prepareAsm(asm);
+ }
+
+ function kindChangedHandler(element) {
+ setKindByIndex(element.selectedIndex);
+ processFileContent();
+ functionChangedHandler();
+ }
+
+ function readLog(evt) {
+ //Retrieve the first (and only!) File from the FileList object
+ var f = evt.target.files[0];
+ if (f) {
+ var r = new FileReader();
+ r.onload = function(e) {
+ var file = evt.target.files[0];
+ currentFunctionKind = "";
+ fileContent = e.target.result;
+ processFileContent();
+ functionChangedHandler();
+ }
+ r.readAsText(f);
+ } else {
+ alert("Failed to load file");
+ }
+ }
+
+ function buildFunctionKindSelector(kindSelectElement) {
+ for (var x = 0; x < kinds.length; ++x) {
+ var optionElement = document.createElement("option");
+ optionElement.value = x;
+ optionElement.text = kinds[x];
+ kindSelectElement.add(optionElement, null);
+ }
+ kindSelectElement.selectedIndex = 1;
+ setKindByIndex(1);
+ }
+
+ return {
+ buildFunctionKindSelector: buildFunctionKindSelector,
+ kindChangedHandler: kindChangedHandler,
+ functionChangedHandler: functionChangedHandler,
+ asmClick: asmClick,
+ addressClick: addressClick,
+ readLog: readLog
+ };
+
+})();
diff --git a/deps/v8/tools/sodium/styles.css b/deps/v8/tools/sodium/styles.css
new file mode 100755
index 0000000000..4f7d89ee78
--- /dev/null
+++ b/deps/v8/tools/sodium/styles.css
@@ -0,0 +1,70 @@
+#table-header {
+ background-color: rgba(150, 150, 255, 0.4);
+}
+
+#asm-container {
+ background-color: rgba(200, 200, 255, 0.4);
+ position:absolute;
+ overflow:auto;
+ cursor:default;
+ width:50%;
+ height:92%;
+}
+
+#source-container {
+ position:absolute;
+ overflow:auto;
+ width:48%;
+ left:51%;
+ height:92%;
+}
+
+table {
+ border-collapse: collapse;
+}
+
+.hover-underline:hover {
+ text-decoration: underline;
+}
+
+.highlight-flash-blue {
+ -webkit-transition: all 1s ease;
+ background-color: rgba(50, 50, 245, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+
+.highlight-green {
+ background-color: rgba(0, 255, 0, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+.highlight-yellow {
+ background-color: rgba(255, 255, 0, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+.highlight-gray {
+ background-color: rgba(128, 128, 128, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+.highlight-red {
+ background-color: rgba(255, 0, 0, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
diff --git a/deps/v8/tools/test-push-to-trunk.sh b/deps/v8/tools/test-push-to-trunk.sh
new file mode 100755
index 0000000000..6c201e4628
--- /dev/null
+++ b/deps/v8/tools/test-push-to-trunk.sh
@@ -0,0 +1,246 @@
+#!/bin/bash
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Tests the push-to-trunk.sh script. Needs to be run in V8 base dir:
+# ./tools/test-push-to-trunk.sh
+
+# TODO(machenbach): Check automatically if expectations match.
+# TODO(machenbach): Mock out version number retrieval.
+# TODO(machenbach): Allow multiple different test cases.
+# TODO(machenbach): Allow multi line mock output.
+# TODO(machenbach): Represent test expectations/mock output without an array
+# index increment.
+
+########## Stdin for push-to-trunk.sh
+
+# Confirm push to trunk commit ID
+INPUT[0]="Y"
+# Open editor
+INPUT[1]=""
+# Confirm increment version number
+INPUT[2]="Y"
+# Reviewer for V8 CL
+INPUT[3]="reviewer@chromium.org"
+# Enter LGTM for V8 CL
+INPUT[4]="LGTM"
+# Confirm checkout sanity
+INPUT[5]="Y"
+# Manually type in trunk revision
+INPUT[6]="12345"
+# Reviewer for Chromium CL
+INPUT[7]="reviewer@chromium.org"
+
+########## Expected commands and mock output
+
+EXP[0]="git status -s -uno"
+OUT[0]=""
+EXP[1]="git status -s -b -uno"
+OUT[1]="## some_branch"
+EXP[2]="git svn fetch"
+OUT[2]=""
+EXP[3]="git branch"
+OUT[3]="not the temp branch"
+EXP[4]="git checkout -b prepare-push-temporary-branch-created-by-script"
+OUT[4]=""
+EXP[5]="git branch"
+OUT[5]="not the branch"
+EXP[6]="git branch"
+OUT[6]="not the trunk branch"
+EXP[7]="git checkout -b prepare-push svn/bleeding_edge"
+OUT[7]=""
+EXP[8]="git log -1 --format=%H ChangeLog"
+OUT[8]="hash1"
+EXP[9]="git log -1 hash1"
+OUT[9]=""
+EXP[10]="git log hash1..HEAD --format=%H"
+OUT[10]="hash2"
+EXP[11]="git log -1 hash2 --format=\"%w(80,8,8)%s\""
+OUT[11]="Log line..."
+EXP[12]="git log -1 hash2 --format=\"%B\""
+OUT[12]="BUG=6789"
+EXP[13]="git log -1 hash2 --format=\"%w(80,8,8)(%an)\""
+OUT[13]=" (author@chromium.org)"
+EXP[14]="git commit -a -m \"Prepare push to trunk. Now working on version 3.4.5.\""
+OUT[14]=""
+EXP[15]="git cl upload -r reviewer@chromium.org --send-mail"
+OUT[15]=""
+EXP[16]="git cl dcommit"
+OUT[16]=""
+EXP[17]="git svn fetch"
+OUT[17]=""
+EXP[18]="git checkout svn/bleeding_edge"
+OUT[18]=""
+EXP[19]="git log -1 --format=%H --grep=Prepare push to trunk. Now working on version 3.4.5."
+OUT[19]="hash3"
+EXP[20]="git diff svn/trunk"
+OUT[20]="patch1"
+EXP[21]="git checkout -b trunk-push svn/trunk"
+OUT[21]=""
+EXP[22]="git apply --index --reject /tmp/v8-push-to-trunk-tempfile-patch"
+OUT[22]=""
+EXP[23]="git add src/version.cc"
+OUT[23]=""
+EXP[24]="git commit -F /tmp/v8-push-to-trunk-tempfile-commitmsg"
+OUT[24]=""
+EXP[25]="git svn dcommit"
+OUT[25]="r1234"
+EXP[26]="git svn tag 3.4.5 -m \"Tagging version 3.4.5\""
+OUT[26]=""
+EXP[27]="git status -s -uno"
+OUT[27]=""
+EXP[28]="git checkout master"
+OUT[28]=""
+EXP[29]="git pull"
+OUT[29]=""
+EXP[30]="git checkout -b v8-roll-12345"
+OUT[30]=""
+EXP[31]="git commit -am Update V8 to version 3.4.5."
+OUT[31]=""
+EXP[32]="git cl upload --send-mail"
+OUT[32]=""
+EXP[33]="git checkout -f some_branch"
+OUT[33]=""
+EXP[34]="git branch -D prepare-push-temporary-branch-created-by-script"
+OUT[34]=""
+EXP[35]="git branch -D prepare-push"
+OUT[35]=""
+EXP[36]="git branch -D trunk-push"
+OUT[36]=""
+
+########## Global temp files for test input/output
+
+export TEST_OUTPUT=$(mktemp)
+export INDEX=$(mktemp)
+export MOCK_OUTPUT=$(mktemp)
+export EXPECTED_COMMANDS=$(mktemp)
+
+########## Command index
+
+inc_index() {
+ local I="$(command cat $INDEX)"
+ let "I+=1"
+ echo "$I" > $INDEX
+ echo $I
+}
+
+echo "-1" > $INDEX
+export -f inc_index
+
+########## Mock output accessor
+
+get_mock_output() {
+ local I=$1
+ let "I+=1"
+ command sed "${I}q;d" $MOCK_OUTPUT
+}
+
+export -f get_mock_output
+
+for E in "${OUT[@]}"; do
+ echo $E
+done > $MOCK_OUTPUT
+
+########## Expected commands accessor
+
+get_expected_command() {
+ local I=$1
+ let "I+=1"
+ command sed "${I}q;d" $EXPECTED_COMMANDS
+}
+
+export -f get_expected_command
+
+for E in "${EXP[@]}"; do
+ echo $E
+done > $EXPECTED_COMMANDS
+
+########## Mock commands
+
+git() {
+ # All calls to git are mocked out. Expected calls and mock output are stored
+ # in the EXP/OUT arrays above.
+ local I=$(inc_index)
+ local OUT=$(get_mock_output $I)
+ local EXP=$(get_expected_command $I)
+ echo "#############################" >> $TEST_OUTPUT
+ echo "Com. Index: $I" >> $TEST_OUTPUT
+ echo "Expected: ${EXP}" >> $TEST_OUTPUT
+ echo "Actual: git $@" >> $TEST_OUTPUT
+ echo "Mock Output: ${OUT}" >> $TEST_OUTPUT
+ echo "${OUT}"
+}
+
+mv() {
+ echo "#############################" >> $TEST_OUTPUT
+ echo "mv $@" >> $TEST_OUTPUT
+}
+
+sed() {
+ # Only calls to sed * -i * are mocked out.
+ echo "#############################" >> $TEST_OUTPUT
+ local arr=$@
+ if [[ "${arr[@]}" =~ "-i" || "${arr[${#arr[@]}-1]}" == "-i" ]]; then
+ echo "sed $@" >> $TEST_OUTPUT
+ else
+ echo "sed $@" >> $TEST_OUTPUT
+ command sed "$@"
+ fi
+}
+
+editor() {
+ echo "#############################" >> $TEST_OUTPUT
+ echo "editor $@" >> $TEST_OUTPUT
+}
+
+cd() {
+ echo "#############################" >> $TEST_OUTPUT
+ echo "cd $@" >> $TEST_OUTPUT
+}
+
+export -f git
+export -f mv
+export -f sed
+export -f cd
+export -f editor
+export EDITOR=editor
+
+########## Invoke script with test stdin
+
+for i in "${INPUT[@]}"; do
+ echo $i
+done | tools/push-to-trunk.sh -c "path/to/chromium"
+
+echo "Collected output:"
+command cat $TEST_OUTPUT
+
+########## Clean up
+
+rm -rf $TEST_OUTPUT
+rm -rf $INDEX
+rm -rf $MOCK_OUTPUT
+rm -rf $EXPECTED_COMMANDS
diff --git a/deps/v8/tools/testrunner/README b/deps/v8/tools/testrunner/README
index 8f0c01f52a..0771ef9dc2 100644
--- a/deps/v8/tools/testrunner/README
+++ b/deps/v8/tools/testrunner/README
@@ -87,12 +87,6 @@ This section is written from the point of view of the tools/ directory.
Implementation needed to run tests locally. Used by run-tests.py. Inspired by
(and partly copied verbatim from) the original test.py script.
-./testrunner/local/old_statusfile.py:
- Provides functionality to read an old-style <testsuite>.status file and
- convert it to new-style syntax. This can be removed once the new-style
- syntax becomes authoritative (and old-style syntax is no longer supported).
- ./status-file-converter.py provides a stand-alone interface to this.
-
./testrunner/objects/*:
A bunch of data container classes, used by the scripts in the various other
directories; serializable for transmission over the network.
diff --git a/deps/v8/tools/testrunner/local/old_statusfile.py b/deps/v8/tools/testrunner/local/old_statusfile.py
deleted file mode 100644
index d634e3ec95..0000000000
--- a/deps/v8/tools/testrunner/local/old_statusfile.py
+++ /dev/null
@@ -1,462 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import cStringIO
-import re
-
-# These outcomes can occur in a TestCase's outcomes list:
-SKIP = 'SKIP'
-FAIL = 'FAIL'
-PASS = 'PASS'
-OKAY = 'OKAY'
-TIMEOUT = 'TIMEOUT'
-CRASH = 'CRASH'
-SLOW = 'SLOW'
-FLAKY = 'FLAKY'
-# These are just for the status files and are mapped below in DEFS:
-FAIL_OK = 'FAIL_OK'
-PASS_OR_FAIL = 'PASS_OR_FAIL'
-
-KEYWORDS = {SKIP: SKIP,
- FAIL: FAIL,
- PASS: PASS,
- OKAY: OKAY,
- TIMEOUT: TIMEOUT,
- CRASH: CRASH,
- SLOW: SLOW,
- FLAKY: FLAKY,
- FAIL_OK: FAIL_OK,
- PASS_OR_FAIL: PASS_OR_FAIL}
-
-class Expression(object):
- pass
-
-
-class Constant(Expression):
-
- def __init__(self, value):
- self.value = value
-
- def Evaluate(self, env, defs):
- return self.value
-
-
-class Variable(Expression):
-
- def __init__(self, name):
- self.name = name
-
- def GetOutcomes(self, env, defs):
- if self.name in env: return set([env[self.name]])
- else: return set([])
-
- def Evaluate(self, env, defs):
- return env[self.name]
-
- def __str__(self):
- return self.name
-
- def string(self, logical):
- return self.__str__()
-
-
-class Outcome(Expression):
-
- def __init__(self, name):
- self.name = name
-
- def GetOutcomes(self, env, defs):
- if self.name in defs:
- return defs[self.name].GetOutcomes(env, defs)
- else:
- return set([self.name])
-
- def __str__(self):
- if self.name in KEYWORDS:
- return "%s" % KEYWORDS[self.name]
- return "'%s'" % self.name
-
- def string(self, logical):
- if logical:
- return "%s" % self.name
- return self.__str__()
-
-
-class Operation(Expression):
-
- def __init__(self, left, op, right):
- self.left = left
- self.op = op
- self.right = right
-
- def Evaluate(self, env, defs):
- if self.op == '||' or self.op == ',':
- return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
- elif self.op == 'if':
- return False
- elif self.op == '==':
- return not self.left.GetOutcomes(env, defs).isdisjoint(self.right.GetOutcomes(env, defs))
- elif self.op == '!=':
- return self.left.GetOutcomes(env, defs).isdisjoint(self.right.GetOutcomes(env, defs))
- else:
- assert self.op == '&&'
- return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
-
- def GetOutcomes(self, env, defs):
- if self.op == '||' or self.op == ',':
- return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs)
- elif self.op == 'if':
- if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
- else: return set([])
- else:
- assert self.op == '&&'
- return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
-
- def __str__(self):
- return self.string(False)
-
- def string(self, logical=False):
- if self.op == 'if':
- return "['%s', %s]" % (self.right.string(True), self.left.string(logical))
- elif self.op == "||" or self.op == ",":
- if logical:
- return "%s or %s" % (self.left.string(True), self.right.string(True))
- else:
- return "%s, %s" % (self.left, self.right)
- elif self.op == "&&":
- return "%s and %s" % (self.left.string(True), self.right.string(True))
- return "%s %s %s" % (self.left.string(logical), self.op,
- self.right.string(logical))
-
-
-def IsAlpha(string):
- for char in string:
- if not (char.isalpha() or char.isdigit() or char == '_'):
- return False
- return True
-
-
-class Tokenizer(object):
- """A simple string tokenizer that chops expressions into variables,
- parens and operators"""
-
- def __init__(self, expr):
- self.index = 0
- self.expr = expr
- self.length = len(expr)
- self.tokens = None
-
- def Current(self, length=1):
- if not self.HasMore(length): return ""
- return self.expr[self.index:self.index + length]
-
- def HasMore(self, length=1):
- return self.index < self.length + (length - 1)
-
- def Advance(self, count=1):
- self.index = self.index + count
-
- def AddToken(self, token):
- self.tokens.append(token)
-
- def SkipSpaces(self):
- while self.HasMore() and self.Current().isspace():
- self.Advance()
-
- def Tokenize(self):
- self.tokens = [ ]
- while self.HasMore():
- self.SkipSpaces()
- if not self.HasMore():
- return None
- if self.Current() == '(':
- self.AddToken('(')
- self.Advance()
- elif self.Current() == ')':
- self.AddToken(')')
- self.Advance()
- elif self.Current() == '$':
- self.AddToken('$')
- self.Advance()
- elif self.Current() == ',':
- self.AddToken(',')
- self.Advance()
- elif IsAlpha(self.Current()):
- buf = ""
- while self.HasMore() and IsAlpha(self.Current()):
- buf += self.Current()
- self.Advance()
- self.AddToken(buf)
- elif self.Current(2) == '&&':
- self.AddToken('&&')
- self.Advance(2)
- elif self.Current(2) == '||':
- self.AddToken('||')
- self.Advance(2)
- elif self.Current(2) == '==':
- self.AddToken('==')
- self.Advance(2)
- elif self.Current(2) == '!=':
- self.AddToken('!=')
- self.Advance(2)
- else:
- return None
- return self.tokens
-
-
-class Scanner(object):
- """A simple scanner that can serve out tokens from a given list"""
-
- def __init__(self, tokens):
- self.tokens = tokens
- self.length = len(tokens)
- self.index = 0
-
- def HasMore(self):
- return self.index < self.length
-
- def Current(self):
- return self.tokens[self.index]
-
- def Advance(self):
- self.index = self.index + 1
-
-
-def ParseAtomicExpression(scan):
- if scan.Current() == "true":
- scan.Advance()
- return Constant(True)
- elif scan.Current() == "false":
- scan.Advance()
- return Constant(False)
- elif IsAlpha(scan.Current()):
- name = scan.Current()
- scan.Advance()
- return Outcome(name)
- elif scan.Current() == '$':
- scan.Advance()
- if not IsAlpha(scan.Current()):
- return None
- name = scan.Current()
- scan.Advance()
- return Variable(name.lower())
- elif scan.Current() == '(':
- scan.Advance()
- result = ParseLogicalExpression(scan)
- if (not result) or (scan.Current() != ')'):
- return None
- scan.Advance()
- return result
- else:
- return None
-
-
-BINARIES = ['==', '!=']
-def ParseOperatorExpression(scan):
- left = ParseAtomicExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() in BINARIES):
- op = scan.Current()
- scan.Advance()
- right = ParseOperatorExpression(scan)
- if not right:
- return None
- left = Operation(left, op, right)
- return left
-
-
-def ParseConditionalExpression(scan):
- left = ParseOperatorExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() == 'if'):
- scan.Advance()
- right = ParseOperatorExpression(scan)
- if not right:
- return None
- left = Operation(left, 'if', right)
- return left
-
-
-LOGICALS = ["&&", "||", ","]
-def ParseLogicalExpression(scan):
- left = ParseConditionalExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() in LOGICALS):
- op = scan.Current()
- scan.Advance()
- right = ParseConditionalExpression(scan)
- if not right:
- return None
- left = Operation(left, op, right)
- return left
-
-
-def ParseCondition(expr):
- """Parses a logical expression into an Expression object"""
- tokens = Tokenizer(expr).Tokenize()
- if not tokens:
- print "Malformed expression: '%s'" % expr
- return None
- scan = Scanner(tokens)
- ast = ParseLogicalExpression(scan)
- if not ast:
- print "Malformed expression: '%s'" % expr
- return None
- if scan.HasMore():
- print "Malformed expression: '%s'" % expr
- return None
- return ast
-
-
-class Section(object):
- """A section of the configuration file. Sections are enabled or
- disabled prior to running the tests, based on their conditions"""
-
- def __init__(self, condition):
- self.condition = condition
- self.rules = [ ]
-
- def AddRule(self, rule):
- self.rules.append(rule)
-
-
-class Rule(object):
- """A single rule that specifies the expected outcome for a single
- test."""
-
- def __init__(self, raw_path, path, value):
- self.raw_path = raw_path
- self.path = path
- self.value = value
-
- def GetOutcomes(self, env, defs):
- return self.value.GetOutcomes(env, defs)
-
- def Contains(self, path):
- if len(self.path) > len(path):
- return False
- for i in xrange(len(self.path)):
- if not self.path[i].match(path[i]):
- return False
- return True
-
-
-HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
-RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
-DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
-PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
-
-
-class ConvertNotation(object):
- def __init__(self, path):
- self.path = path
- self.indent = ""
- self.comment = []
- self.init = False
- self.section = False
- self.out = cStringIO.StringIO()
-
- def OpenGlobal(self):
- if self.init: return
- self.WriteComment()
- print >> self.out, "["
- self.init = True
-
- def CloseGlobal(self):
- if not self.init: self.OpenGlobal()
- print >> self.out, "]"
- self.init = False
-
- def OpenSection(self, condition="ALWAYS"):
- if self.section: return
- self.OpenGlobal()
- if type(condition) != str:
- condition = "'%s'" % condition.string(True)
- print >> self.out, "%s[%s, {" % (self.indent, condition)
- self.indent += " " * 2
- self.section = condition
-
- def CloseSection(self):
- if not self.section: return
- self.indent = self.indent[:-2]
- print >> self.out, "%s}], # %s" % (self.indent, self.section)
- self.section = False
-
- def WriteComment(self):
- if not self.comment: return
- for c in self.comment:
- if len(c.strip()) == 0:
- print >> self.out, ""
- else:
- print >> self.out, "%s%s" % (self.indent, c),
- self.comment = []
-
- def GetOutput(self):
- with open(self.path) as f:
- for line in f:
- if line[0] == '#':
- self.comment += [line]
- continue
- if len(line.strip()) == 0:
- self.comment += [line]
- continue
- header_match = HEADER_PATTERN.match(line)
- if header_match:
- condition = ParseCondition(header_match.group(1).strip())
- self.CloseSection()
- self.WriteComment()
- self.OpenSection(condition)
- continue
- rule_match = RULE_PATTERN.match(line)
- if rule_match:
- self.OpenSection()
- self.WriteComment()
- path = rule_match.group(1).strip()
- value_str = rule_match.group(2).strip()
- comment = ""
- if '#' in value_str:
- pos = value_str.find('#')
- comment = " %s" % value_str[pos:].strip()
- value_str = value_str[:pos].strip()
- value = ParseCondition(value_str)
- print >> self.out, ("%s'%s': [%s],%s" %
- (self.indent, path, value, comment))
- continue
- def_match = DEF_PATTERN.match(line)
- if def_match:
- # Custom definitions are deprecated.
- continue
- prefix_match = PREFIX_PATTERN.match(line)
- if prefix_match:
- continue
- print "Malformed line: '%s'." % line
- self.CloseSection()
- self.CloseGlobal()
- result = self.out.getvalue()
- self.out.close()
- return result
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 1d30fe3d3c..da0c797d0a 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -26,14 +26,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# These imports are required for the on-demand conversion from
-# old to new status file format.
-from os.path import exists
-from os.path import getmtime
-
-from . import old_statusfile
-
-
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
FAIL = "FAIL"
@@ -43,6 +35,7 @@ TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
FLAKY = "FLAKY"
+NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
@@ -51,7 +44,7 @@ ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
- PASS_OR_FAIL, ALWAYS]:
+ NO_VARIANTS, PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
@@ -60,7 +53,8 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
- "mipsel", "x64", "nacl_ia32", "nacl_x64"]:
+ "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows",
+ "linux"]:
VARIABLES[var] = var
@@ -68,6 +62,10 @@ def DoSkip(outcomes):
return SKIP in outcomes or SLOW in outcomes
+def OnlyStandardVariant(outcomes):
+ return NO_VARIANTS in outcomes
+
+
def IsFlaky(outcomes):
return FLAKY in outcomes
@@ -116,18 +114,6 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
def ReadStatusFile(path, variables):
- # As long as the old-format .status files are authoritative, just
- # create the converted version on demand and cache it to speed up
- # subsequent runs.
- if path.endswith(".status"):
- newpath = path + "2"
- if not exists(newpath) or getmtime(newpath) < getmtime(path):
- print "Converting status file."
- converted = old_statusfile.ConvertNotation(path).GetOutput()
- with open(newpath, 'w') as f:
- f.write(converted)
- path = newpath
-
with open(path) as f:
global KEYWORDS
contents = eval(f.read(), KEYWORDS)
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index b0372e7f73..8517ce9f49 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -74,8 +74,10 @@ class TestSuite(object):
def ListTests(self, context):
raise NotImplementedError
- def VariantFlags(self):
- return None
+ def VariantFlags(self, testcase, default_flags):
+ if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
+ return [[]]
+ return default_flags
def DownloadData(self):
pass
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index b7caa121f3..61ee7dac67 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -65,7 +65,7 @@ def GuessOS():
elif system == 'Windows' or system == 'Microsoft':
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082
- return 'win32'
+ return 'windows'
elif system == 'FreeBSD':
return 'freebsd'
elif system == 'OpenBSD':
@@ -105,4 +105,4 @@ def GuessWordsize():
def IsWindows():
- return GuessOS() == 'win32'
+ return GuessOS() == 'windows'
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index 3ea215a708..1f525b76b3 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -28,7 +28,7 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
- isolates, command_prefix, extra_flags):
+ isolates, command_prefix, extra_flags, noi18n):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -38,13 +38,14 @@ class Context():
self.isolates = isolates
self.command_prefix = command_prefix
self.extra_flags = extra_flags
+ self.noi18n = noi18n
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
- self.command_prefix, self.extra_flags]
+ self.command_prefix, self.extra_flags, self.noi18n]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Context(packed[0], packed[1], None, packed[2], False,
- packed[3], packed[4], packed[5], packed[6])
+ packed[3], packed[4], packed[5], packed[6], packed[7])
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 1f72c37236..06141c2f96 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -71,53 +71,54 @@ INSTANCE_TYPES = {
144: "EXTERNAL_FLOAT_ARRAY_TYPE",
145: "EXTERNAL_DOUBLE_ARRAY_TYPE",
146: "EXTERNAL_PIXEL_ARRAY_TYPE",
- 148: "FILLER_TYPE",
- 149: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 150: "DECLARED_ACCESSOR_INFO_TYPE",
- 151: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 152: "ACCESSOR_PAIR_TYPE",
- 153: "ACCESS_CHECK_INFO_TYPE",
- 154: "INTERCEPTOR_INFO_TYPE",
- 155: "CALL_HANDLER_INFO_TYPE",
- 156: "FUNCTION_TEMPLATE_INFO_TYPE",
- 157: "OBJECT_TEMPLATE_INFO_TYPE",
- 158: "SIGNATURE_INFO_TYPE",
- 159: "TYPE_SWITCH_INFO_TYPE",
- 161: "ALLOCATION_MEMENTO_TYPE",
- 160: "ALLOCATION_SITE_TYPE",
- 162: "SCRIPT_TYPE",
- 163: "CODE_CACHE_TYPE",
- 164: "POLYMORPHIC_CODE_CACHE_TYPE",
- 165: "TYPE_FEEDBACK_INFO_TYPE",
- 166: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 167: "BOX_TYPE",
- 170: "FIXED_ARRAY_TYPE",
+ 149: "FILLER_TYPE",
+ 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 151: "DECLARED_ACCESSOR_INFO_TYPE",
+ 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 153: "ACCESSOR_PAIR_TYPE",
+ 154: "ACCESS_CHECK_INFO_TYPE",
+ 155: "INTERCEPTOR_INFO_TYPE",
+ 156: "CALL_HANDLER_INFO_TYPE",
+ 157: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 158: "OBJECT_TEMPLATE_INFO_TYPE",
+ 159: "SIGNATURE_INFO_TYPE",
+ 160: "TYPE_SWITCH_INFO_TYPE",
+ 162: "ALLOCATION_MEMENTO_TYPE",
+ 161: "ALLOCATION_SITE_TYPE",
+ 163: "SCRIPT_TYPE",
+ 164: "CODE_CACHE_TYPE",
+ 165: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 166: "TYPE_FEEDBACK_INFO_TYPE",
+ 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 168: "BOX_TYPE",
+ 171: "FIXED_ARRAY_TYPE",
147: "FIXED_DOUBLE_ARRAY_TYPE",
- 171: "SHARED_FUNCTION_INFO_TYPE",
- 172: "JS_MESSAGE_OBJECT_TYPE",
- 175: "JS_VALUE_TYPE",
- 176: "JS_DATE_TYPE",
- 177: "JS_OBJECT_TYPE",
- 178: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 179: "JS_GENERATOR_OBJECT_TYPE",
- 180: "JS_MODULE_TYPE",
- 181: "JS_GLOBAL_OBJECT_TYPE",
- 182: "JS_BUILTINS_OBJECT_TYPE",
- 183: "JS_GLOBAL_PROXY_TYPE",
- 184: "JS_ARRAY_TYPE",
- 185: "JS_ARRAY_BUFFER_TYPE",
- 186: "JS_TYPED_ARRAY_TYPE",
- 187: "JS_DATA_VIEW_TYPE",
- 174: "JS_PROXY_TYPE",
- 188: "JS_SET_TYPE",
- 189: "JS_MAP_TYPE",
- 190: "JS_WEAK_MAP_TYPE",
- 191: "JS_WEAK_SET_TYPE",
- 192: "JS_REGEXP_TYPE",
- 193: "JS_FUNCTION_TYPE",
- 173: "JS_FUNCTION_PROXY_TYPE",
- 168: "DEBUG_INFO_TYPE",
- 169: "BREAK_POINT_INFO_TYPE",
+ 148: "CONSTANT_POOL_ARRAY_TYPE",
+ 172: "SHARED_FUNCTION_INFO_TYPE",
+ 173: "JS_MESSAGE_OBJECT_TYPE",
+ 176: "JS_VALUE_TYPE",
+ 177: "JS_DATE_TYPE",
+ 178: "JS_OBJECT_TYPE",
+ 179: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 180: "JS_GENERATOR_OBJECT_TYPE",
+ 181: "JS_MODULE_TYPE",
+ 182: "JS_GLOBAL_OBJECT_TYPE",
+ 183: "JS_BUILTINS_OBJECT_TYPE",
+ 184: "JS_GLOBAL_PROXY_TYPE",
+ 185: "JS_ARRAY_TYPE",
+ 186: "JS_ARRAY_BUFFER_TYPE",
+ 187: "JS_TYPED_ARRAY_TYPE",
+ 188: "JS_DATA_VIEW_TYPE",
+ 175: "JS_PROXY_TYPE",
+ 189: "JS_SET_TYPE",
+ 190: "JS_MAP_TYPE",
+ 191: "JS_WEAK_MAP_TYPE",
+ 192: "JS_WEAK_SET_TYPE",
+ 193: "JS_REGEXP_TYPE",
+ 194: "JS_FUNCTION_TYPE",
+ 174: "JS_FUNCTION_PROXY_TYPE",
+ 169: "DEBUG_INFO_TYPE",
+ 170: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
@@ -126,85 +127,86 @@ KNOWN_MAPS = {
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
0x080f9: (4, "AsciiInternalizedStringMap"),
- 0x08121: (170, "FixedArrayMap"),
+ 0x08121: (171, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
0x08171: (137, "FreeSpaceMap"),
- 0x08199: (148, "OnePointerFillerMap"),
- 0x081c1: (148, "TwoPointerFillerMap"),
+ 0x08199: (149, "OnePointerFillerMap"),
+ 0x081c1: (149, "TwoPointerFillerMap"),
0x081e9: (132, "CellMap"),
0x08211: (133, "GlobalPropertyCellMap"),
- 0x08239: (171, "SharedFunctionInfoMap"),
- 0x08261: (170, "NativeContextMap"),
+ 0x08239: (172, "SharedFunctionInfoMap"),
+ 0x08261: (171, "NativeContextMap"),
0x08289: (130, "CodeMap"),
- 0x082b1: (170, "ScopeInfoMap"),
- 0x082d9: (170, "FixedCOWArrayMap"),
+ 0x082b1: (171, "ScopeInfoMap"),
+ 0x082d9: (171, "FixedCOWArrayMap"),
0x08301: (147, "FixedDoubleArrayMap"),
- 0x08329: (170, "HashTableMap"),
- 0x08351: (128, "SymbolMap"),
- 0x08379: (64, "StringMap"),
- 0x083a1: (68, "AsciiStringMap"),
- 0x083c9: (65, "ConsStringMap"),
- 0x083f1: (69, "ConsAsciiStringMap"),
- 0x08419: (67, "SlicedStringMap"),
- 0x08441: (71, "SlicedAsciiStringMap"),
- 0x08469: (66, "ExternalStringMap"),
- 0x08491: (74, "ExternalStringWithOneByteDataMap"),
- 0x084b9: (70, "ExternalAsciiStringMap"),
- 0x084e1: (82, "ShortExternalStringMap"),
- 0x08509: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x08531: (0, "InternalizedStringMap"),
- 0x08559: (1, "ConsInternalizedStringMap"),
- 0x08581: (5, "ConsAsciiInternalizedStringMap"),
- 0x085a9: (2, "ExternalInternalizedStringMap"),
- 0x085d1: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x085f9: (6, "ExternalAsciiInternalizedStringMap"),
- 0x08621: (18, "ShortExternalInternalizedStringMap"),
- 0x08649: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08671: (22, "ShortExternalAsciiInternalizedStringMap"),
- 0x08699: (86, "ShortExternalAsciiStringMap"),
- 0x086c1: (64, "UndetectableStringMap"),
- 0x086e9: (68, "UndetectableAsciiStringMap"),
- 0x08711: (138, "ExternalByteArrayMap"),
- 0x08739: (139, "ExternalUnsignedByteArrayMap"),
- 0x08761: (140, "ExternalShortArrayMap"),
- 0x08789: (141, "ExternalUnsignedShortArrayMap"),
- 0x087b1: (142, "ExternalIntArrayMap"),
- 0x087d9: (143, "ExternalUnsignedIntArrayMap"),
- 0x08801: (144, "ExternalFloatArrayMap"),
- 0x08829: (145, "ExternalDoubleArrayMap"),
- 0x08851: (146, "ExternalPixelArrayMap"),
- 0x08879: (170, "NonStrictArgumentsElementsMap"),
- 0x088a1: (170, "FunctionContextMap"),
- 0x088c9: (170, "CatchContextMap"),
- 0x088f1: (170, "WithContextMap"),
- 0x08919: (170, "BlockContextMap"),
- 0x08941: (170, "ModuleContextMap"),
- 0x08969: (170, "GlobalContextMap"),
- 0x08991: (172, "JSMessageObjectMap"),
- 0x089b9: (135, "ForeignMap"),
- 0x089e1: (177, "NeanderMap"),
- 0x08a09: (161, "AllocationMementoMap"),
- 0x08a31: (160, "AllocationSiteMap"),
- 0x08a59: (164, "PolymorphicCodeCacheMap"),
- 0x08a81: (162, "ScriptMap"),
- 0x08ad1: (177, "ExternalMap"),
- 0x08af9: (167, "BoxMap"),
- 0x08b21: (149, "DeclaredAccessorDescriptorMap"),
- 0x08b49: (150, "DeclaredAccessorInfoMap"),
- 0x08b71: (151, "ExecutableAccessorInfoMap"),
- 0x08b99: (152, "AccessorPairMap"),
- 0x08bc1: (153, "AccessCheckInfoMap"),
- 0x08be9: (154, "InterceptorInfoMap"),
- 0x08c11: (155, "CallHandlerInfoMap"),
- 0x08c39: (156, "FunctionTemplateInfoMap"),
- 0x08c61: (157, "ObjectTemplateInfoMap"),
- 0x08c89: (158, "SignatureInfoMap"),
- 0x08cb1: (159, "TypeSwitchInfoMap"),
- 0x08cd9: (163, "CodeCacheMap"),
- 0x08d01: (165, "TypeFeedbackInfoMap"),
- 0x08d29: (166, "AliasedArgumentsEntryMap"),
- 0x08d51: (168, "DebugInfoMap"),
- 0x08d79: (169, "BreakPointInfoMap"),
+ 0x08329: (148, "ConstantPoolArrayMap"),
+ 0x08351: (171, "HashTableMap"),
+ 0x08379: (128, "SymbolMap"),
+ 0x083a1: (64, "StringMap"),
+ 0x083c9: (68, "AsciiStringMap"),
+ 0x083f1: (65, "ConsStringMap"),
+ 0x08419: (69, "ConsAsciiStringMap"),
+ 0x08441: (67, "SlicedStringMap"),
+ 0x08469: (71, "SlicedAsciiStringMap"),
+ 0x08491: (66, "ExternalStringMap"),
+ 0x084b9: (74, "ExternalStringWithOneByteDataMap"),
+ 0x084e1: (70, "ExternalAsciiStringMap"),
+ 0x08509: (82, "ShortExternalStringMap"),
+ 0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x08559: (0, "InternalizedStringMap"),
+ 0x08581: (1, "ConsInternalizedStringMap"),
+ 0x085a9: (5, "ConsAsciiInternalizedStringMap"),
+ 0x085d1: (2, "ExternalInternalizedStringMap"),
+ 0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x08621: (6, "ExternalAsciiInternalizedStringMap"),
+ 0x08649: (18, "ShortExternalInternalizedStringMap"),
+ 0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08699: (22, "ShortExternalAsciiInternalizedStringMap"),
+ 0x086c1: (86, "ShortExternalAsciiStringMap"),
+ 0x086e9: (64, "UndetectableStringMap"),
+ 0x08711: (68, "UndetectableAsciiStringMap"),
+ 0x08739: (138, "ExternalByteArrayMap"),
+ 0x08761: (139, "ExternalUnsignedByteArrayMap"),
+ 0x08789: (140, "ExternalShortArrayMap"),
+ 0x087b1: (141, "ExternalUnsignedShortArrayMap"),
+ 0x087d9: (142, "ExternalIntArrayMap"),
+ 0x08801: (143, "ExternalUnsignedIntArrayMap"),
+ 0x08829: (144, "ExternalFloatArrayMap"),
+ 0x08851: (145, "ExternalDoubleArrayMap"),
+ 0x08879: (146, "ExternalPixelArrayMap"),
+ 0x088a1: (171, "NonStrictArgumentsElementsMap"),
+ 0x088c9: (171, "FunctionContextMap"),
+ 0x088f1: (171, "CatchContextMap"),
+ 0x08919: (171, "WithContextMap"),
+ 0x08941: (171, "BlockContextMap"),
+ 0x08969: (171, "ModuleContextMap"),
+ 0x08991: (171, "GlobalContextMap"),
+ 0x089b9: (173, "JSMessageObjectMap"),
+ 0x089e1: (135, "ForeignMap"),
+ 0x08a09: (178, "NeanderMap"),
+ 0x08a31: (162, "AllocationMementoMap"),
+ 0x08a59: (161, "AllocationSiteMap"),
+ 0x08a81: (165, "PolymorphicCodeCacheMap"),
+ 0x08aa9: (163, "ScriptMap"),
+ 0x08af9: (178, "ExternalMap"),
+ 0x08b21: (168, "BoxMap"),
+ 0x08b49: (150, "DeclaredAccessorDescriptorMap"),
+ 0x08b71: (151, "DeclaredAccessorInfoMap"),
+ 0x08b99: (152, "ExecutableAccessorInfoMap"),
+ 0x08bc1: (153, "AccessorPairMap"),
+ 0x08be9: (154, "AccessCheckInfoMap"),
+ 0x08c11: (155, "InterceptorInfoMap"),
+ 0x08c39: (156, "CallHandlerInfoMap"),
+ 0x08c61: (157, "FunctionTemplateInfoMap"),
+ 0x08c89: (158, "ObjectTemplateInfoMap"),
+ 0x08cb1: (159, "SignatureInfoMap"),
+ 0x08cd9: (160, "TypeSwitchInfoMap"),
+ 0x08d01: (164, "CodeCacheMap"),
+ 0x08d29: (166, "TypeFeedbackInfoMap"),
+ 0x08d51: (167, "AliasedArgumentsEntryMap"),
+ 0x08d79: (169, "DebugInfoMap"),
+ 0x08da1: (170, "BreakPointInfoMap"),
}
# List of known V8 objects.
@@ -250,6 +252,6 @@ KNOWN_OBJECTS = {
("OLD_DATA_SPACE", 0x082c9): "EmptyExternalPixelArray",
("OLD_DATA_SPACE", 0x082d5): "InfinityValue",
("OLD_DATA_SPACE", 0x082e1): "MinusZeroValue",
- ("CODE_SPACE", 0x11141): "JsConstructEntryCode",
- ("CODE_SPACE", 0x18da1): "JsEntryCode",
+ ("CODE_SPACE", 0x111a1): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x18bc1): "JsEntryCode",
}