summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-11-03 10:34:22 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-11-03 10:34:22 -0700
commit0e9c1ca67399868e8d602e146dc51d84ad9fdc15 (patch)
tree6171b2ab5bcad0697b60a75c30ac6c4d0674dfec /deps
parenta6dbe0ff23a8d73cd747de30c426753ae743113a (diff)
downloadandroid-node-v8-0e9c1ca67399868e8d602e146dc51d84ad9fdc15.tar.gz
android-node-v8-0e9c1ca67399868e8d602e146dc51d84ad9fdc15.tar.bz2
android-node-v8-0e9c1ca67399868e8d602e146dc51d84ad9fdc15.zip
Downgrade V8 to 3.6.4
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/ChangeLog71
-rw-r--r--deps/v8/Makefile45
-rw-r--r--deps/v8/SConstruct3
-rw-r--r--deps/v8/benchmarks/spinning-balls/index.html11
-rw-r--r--deps/v8/benchmarks/spinning-balls/splay-tree.js326
-rw-r--r--deps/v8/benchmarks/spinning-balls/v.js387
-rw-r--r--deps/v8/build/common.gypi12
-rw-r--r--deps/v8/build/standalone.gypi31
-rw-r--r--[-rwxr-xr-x]deps/v8/include/v8-debug.h5
-rw-r--r--deps/v8/include/v8.h42
-rw-r--r--[-rwxr-xr-x]deps/v8/src/SConscript2
-rw-r--r--deps/v8/src/api.cc127
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h24
-rw-r--r--deps/v8/src/arm/assembler-arm.cc12
-rw-r--r--deps/v8/src/arm/assembler-arm.h10
-rw-r--r--deps/v8/src/arm/builtins-arm.cc1118
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc598
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h245
-rw-r--r--deps/v8/src/arm/codegen-arm.cc8
-rw-r--r--deps/v8/src/arm/codegen-arm.h10
-rw-r--r--deps/v8/src/arm/debug-arm.cc82
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc34
-rw-r--r--deps/v8/src/arm/frames-arm.h10
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc231
-rw-r--r--deps/v8/src/arm/ic-arm.cc149
-rw-r--r--deps/v8/src/arm/lithium-arm.cc38
-rw-r--r--deps/v8/src/arm/lithium-arm.h16
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc182
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h7
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc566
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h226
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc15
-rw-r--r--deps/v8/src/arm/simulator-arm.cc2
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc357
-rw-r--r--deps/v8/src/array.js151
-rw-r--r--deps/v8/src/assembler.cc56
-rw-r--r--deps/v8/src/assembler.h30
-rw-r--r--deps/v8/src/ast.cc154
-rw-r--r--deps/v8/src/ast.h41
-rw-r--r--deps/v8/src/bootstrapper.cc57
-rw-r--r--deps/v8/src/builtins.cc147
-rw-r--r--deps/v8/src/cached-powers.cc12
-rw-r--r--deps/v8/src/code-stubs.cc45
-rw-r--r--deps/v8/src/code-stubs.h117
-rw-r--r--deps/v8/src/codegen.cc2
-rw-r--r--deps/v8/src/compiler-intrinsics.h77
-rw-r--r--deps/v8/src/compiler.cc13
-rw-r--r--deps/v8/src/compiler.h8
-rw-r--r--deps/v8/src/contexts.cc120
-rw-r--r--deps/v8/src/contexts.h41
-rw-r--r--deps/v8/src/conversions-inl.h2
-rw-r--r--deps/v8/src/conversions.h2
-rw-r--r--deps/v8/src/d8-debug.cc5
-rw-r--r--deps/v8/src/d8.cc34
-rw-r--r--deps/v8/src/debug.cc226
-rw-r--r--deps/v8/src/debug.h90
-rw-r--r--deps/v8/src/deoptimizer.cc70
-rw-r--r--deps/v8/src/deoptimizer.h18
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/elements.cc11
-rw-r--r--deps/v8/src/execution.cc175
-rw-r--r--deps/v8/src/execution.h13
-rw-r--r--deps/v8/src/extensions/gc-extension.cc7
-rw-r--r--deps/v8/src/factory.cc96
-rw-r--r--deps/v8/src/factory.h32
-rw-r--r--deps/v8/src/flag-definitions.h38
-rw-r--r--deps/v8/src/frames-inl.h76
-rw-r--r--deps/v8/src/frames.cc117
-rw-r--r--deps/v8/src/frames.h80
-rw-r--r--deps/v8/src/full-codegen.cc43
-rw-r--r--deps/v8/src/full-codegen.h25
-rw-r--r--deps/v8/src/globals.h4
-rw-r--r--deps/v8/src/handles.cc19
-rw-r--r--deps/v8/src/handles.h15
-rw-r--r--deps/v8/src/heap-inl.h104
-rw-r--r--deps/v8/src/heap-profiler.cc1
-rw-r--r--deps/v8/src/heap.cc1531
-rw-r--r--deps/v8/src/heap.h463
-rw-r--r--deps/v8/src/hydrogen-instructions.cc66
-rw-r--r--deps/v8/src/hydrogen-instructions.h344
-rw-r--r--deps/v8/src/hydrogen.cc437
-rw-r--r--deps/v8/src/hydrogen.h29
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h26
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc87
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h88
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc1031
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1126
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h291
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc46
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc95
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc100
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc29
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc358
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc160
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc255
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h13
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc64
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h28
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc737
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h226
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc116
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc401
-rw-r--r--deps/v8/src/ic-inl.h2
-rw-r--r--deps/v8/src/ic.cc218
-rw-r--r--deps/v8/src/ic.h35
-rw-r--r--deps/v8/src/incremental-marking-inl.h155
-rw-r--r--deps/v8/src/incremental-marking.cc818
-rw-r--r--deps/v8/src/incremental-marking.h256
-rw-r--r--deps/v8/src/isolate-inl.h15
-rw-r--r--deps/v8/src/isolate.cc30
-rw-r--r--deps/v8/src/isolate.h29
-rw-r--r--deps/v8/src/json-parser.h2
-rw-r--r--deps/v8/src/jsregexp.cc7
-rw-r--r--deps/v8/src/jsregexp.h5
-rw-r--r--deps/v8/src/lithium-allocator.cc19
-rw-r--r--deps/v8/src/lithium.cc22
-rw-r--r--deps/v8/src/lithium.h18
-rw-r--r--deps/v8/src/liveedit.cc11
-rw-r--r--deps/v8/src/liveobjectlist.cc14
-rw-r--r--deps/v8/src/log.cc14
-rw-r--r--deps/v8/src/log.h1
-rw-r--r--deps/v8/src/macro-assembler.h59
-rw-r--r--deps/v8/src/mark-compact-inl.h101
-rw-r--r--deps/v8/src/mark-compact.cc3729
-rw-r--r--deps/v8/src/mark-compact.h640
-rw-r--r--deps/v8/src/messages.cc14
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h36
-rw-r--r--deps/v8/src/mips/assembler-mips.cc12
-rw-r--r--deps/v8/src/mips/builtins-mips.cc1151
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1060
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h246
-rw-r--r--deps/v8/src/mips/codegen-mips.cc8
-rw-r--r--deps/v8/src/mips/codegen-mips.h15
-rw-r--r--deps/v8/src/mips/debug-mips.cc83
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc3
-rw-r--r--deps/v8/src/mips/frames-mips.h14
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc249
-rw-r--r--deps/v8/src/mips/ic-mips.cc180
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1033
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h418
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc26
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc410
-rw-r--r--deps/v8/src/mksnapshot.cc3
-rw-r--r--deps/v8/src/objects-debug.cc25
-rw-r--r--deps/v8/src/objects-inl.h660
-rw-r--r--deps/v8/src/objects-printer.cc121
-rw-r--r--deps/v8/src/objects-visiting-inl.h143
-rw-r--r--deps/v8/src/objects-visiting.cc3
-rw-r--r--deps/v8/src/objects-visiting.h138
-rw-r--r--deps/v8/src/objects.cc1602
-rw-r--r--deps/v8/src/objects.h691
-rw-r--r--deps/v8/src/parser.cc235
-rw-r--r--deps/v8/src/parser.h15
-rw-r--r--deps/v8/src/platform-freebsd.cc108
-rw-r--r--deps/v8/src/platform-linux.cc158
-rw-r--r--deps/v8/src/platform-macos.cc137
-rw-r--r--deps/v8/src/platform-openbsd.cc5
-rw-r--r--deps/v8/src/platform-posix.cc28
-rw-r--r--deps/v8/src/platform-solaris.cc141
-rw-r--r--deps/v8/src/platform-win32.cc91
-rw-r--r--deps/v8/src/platform.h62
-rw-r--r--deps/v8/src/preparser.cc72
-rw-r--r--deps/v8/src/preparser.h4
-rw-r--r--deps/v8/src/prettyprinter.cc26
-rw-r--r--deps/v8/src/profile-generator.cc70
-rw-r--r--deps/v8/src/profile-generator.h11
-rw-r--r--deps/v8/src/property.h21
-rw-r--r--deps/v8/src/proxy.js23
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc4
-rw-r--r--deps/v8/src/regexp.js11
-rw-r--r--deps/v8/src/runtime-profiler.cc4
-rw-r--r--deps/v8/src/runtime.cc962
-rw-r--r--deps/v8/src/runtime.h4
-rw-r--r--deps/v8/src/runtime.js18
-rw-r--r--deps/v8/src/scanner.cc8
-rw-r--r--deps/v8/src/scanner.h10
-rw-r--r--deps/v8/src/scopeinfo.cc20
-rw-r--r--deps/v8/src/scopeinfo.h79
-rw-r--r--deps/v8/src/scopes.cc52
-rw-r--r--deps/v8/src/scopes.h12
-rw-r--r--deps/v8/src/serialize.cc133
-rw-r--r--deps/v8/src/serialize.h3
-rw-r--r--deps/v8/src/spaces-inl.h528
-rw-r--r--deps/v8/src/spaces.cc2936
-rw-r--r--deps/v8/src/spaces.h2577
-rw-r--r--deps/v8/src/splay-tree-inl.h6
-rw-r--r--deps/v8/src/store-buffer-inl.h79
-rw-r--r--deps/v8/src/store-buffer.cc694
-rw-r--r--deps/v8/src/store-buffer.h248
-rw-r--r--deps/v8/src/string.js6
-rw-r--r--deps/v8/src/strtod.cc1
-rw-r--r--deps/v8/src/stub-cache.cc69
-rw-r--r--deps/v8/src/stub-cache.h13
-rw-r--r--deps/v8/src/token.h4
-rw-r--r--deps/v8/src/type-info.cc58
-rw-r--r--deps/v8/src/type-info.h9
-rw-r--r--deps/v8/src/uri.js37
-rw-r--r--deps/v8/src/utils.h18
-rw-r--r--deps/v8/src/v8-counters.h9
-rw-r--r--deps/v8/src/v8.cc16
-rw-r--r--deps/v8/src/v8.h12
-rw-r--r--deps/v8/src/v8globals.h69
-rw-r--r--deps/v8/src/v8natives.js82
-rw-r--r--deps/v8/src/v8utils.h10
-rw-r--r--deps/v8/src/variables.cc4
-rw-r--r--deps/v8/src/variables.h36
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/win32-headers.h1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h26
-rw-r--r--deps/v8/src/x64/assembler-x64.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64.h15
-rw-r--r--deps/v8/src/x64/builtins-x64.cc1077
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc499
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h277
-rw-r--r--deps/v8/src/x64/codegen-x64.cc8
-rw-r--r--deps/v8/src/x64/debug-x64.cc105
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc32
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc219
-rw-r--r--deps/v8/src/x64/ic-x64.cc130
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc196
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h10
-rw-r--r--deps/v8/src/x64/lithium-x64.cc34
-rw-r--r--deps/v8/src/x64/lithium-x64.h28
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc620
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h252
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc22
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc299
-rw-r--r--deps/v8/test/cctest/cctest.gyp9
-rw-r--r--deps/v8/test/cctest/cctest.status14
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-alloc.cc40
-rw-r--r--deps/v8/test/cctest/test-api.cc444
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc26
-rw-r--r--deps/v8/test/cctest/test-compiler.cc11
-rw-r--r--deps/v8/test/cctest/test-debug.cc41
-rw-r--r--deps/v8/test/cctest/test-decls.cc40
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc48
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc2
-rw-r--r--deps/v8/test/cctest/test-heap.cc202
-rw-r--r--deps/v8/test/cctest/test-log.cc2
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc29
-rwxr-xr-xdeps/v8/test/cctest/test-parsing.cc2
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc2
-rw-r--r--deps/v8/test/cctest/test-regexp.cc3
-rw-r--r--deps/v8/test/cctest/test-reloc-info.cc2
-rw-r--r--deps/v8/test/cctest/test-serialize.cc37
-rw-r--r--deps/v8/test/cctest/test-spaces.cc90
-rw-r--r--deps/v8/test/cctest/test-strings.cc31
-rw-r--r--deps/v8/test/cctest/test-threads.cc22
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc35
-rw-r--r--deps/v8/test/es5conform/es5conform.status34
-rw-r--r--deps/v8/test/mjsunit/array-tostring.js159
-rw-r--r--deps/v8/test/mjsunit/assert-opt-and-deopt.js6
-rw-r--r--deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-context-slots.js49
-rw-r--r--deps/v8/test/mjsunit/const-redecl.js62
-rw-r--r--deps/v8/test/mjsunit/element-kind.js179
-rw-r--r--deps/v8/test/mjsunit/global-const-var-conflicts.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/block-conflicts.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-leave.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-crankshaft.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-declaration.js57
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-semantics.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/block-scoping.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-blockscopes.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-function.js382
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-hash.js66
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js844
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-100409.js (renamed from deps/v8/test/mjsunit/compiler/regress-96989.js)32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1170.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1213575.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1217.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1415.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1521.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1639-2.js93
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1692.js89
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1708.js63
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1711.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1713.js127
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1748.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1757.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-877615.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-94873.js78
-rw-r--r--deps/v8/test/mjsunit/regress/regress-98773.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-99167.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-gc.js2
-rw-r--r--deps/v8/test/mjsunit/regress/short-circuit.js32
-rw-r--r--deps/v8/test/mjsunit/string-slices-regexp.js2
-rwxr-xr-xdeps/v8/test/mjsunit/string-slices.js17
-rw-r--r--deps/v8/test/mjsunit/undeletable-functions.js4
-rw-r--r--deps/v8/test/mozilla/mozilla.status9
-rw-r--r--deps/v8/test/preparser/strict-identifiers.pyt55
-rw-r--r--deps/v8/test/sputnik/sputnik.status16
-rw-r--r--deps/v8/test/test262/README4
-rw-r--r--deps/v8/test/test262/test262.status1312
-rw-r--r--deps/v8/test/test262/testcfg.py34
-rwxr-xr-xdeps/v8/tools/gc-nvp-trace-processor.py59
-rw-r--r--deps/v8/tools/gcmole/gccause.lua2
-rw-r--r--deps/v8/tools/gyp/v8.gyp14
-rwxr-xr-xdeps/v8/tools/linux-tick-processor31
-rwxr-xr-xdeps/v8/tools/ll_prof.py16
-rw-r--r--deps/v8/tools/logreader.js7
-rwxr-xr-xdeps/v8/tools/presubmit.py7
-rwxr-xr-xdeps/v8/tools/push-to-trunk.sh12
-rwxr-xr-xdeps/v8/tools/test-wrapper-gypbuild.py30
307 files changed, 17100 insertions, 32350 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index a95f3cc34a..99495dd46b 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,74 +1,3 @@
-2011-10-13: Version 3.7.0
-
- Fixed array handling for Object.defineOwnProperty (ES5 conformance).
-
- Fixed issue 1757 (string slices of external strings).
-
- Fixed issue 1759 (ARM).
-
- Added flag --noclever-optimizations to disable some things that
- caused trouble in the past.
-
- Added flag --stress-compaction for testing.
-
- Added flag --harmony to activate all experimental Harmony features.
-
-
-2011-10-10: Version 3.6.6
-
- Added a GC pause visualization tool.
-
- Added presubmit=no and werror=no flags to Makefile.
-
- ES5/Test262 conformance improvements.
-
- Fixed compilation issues with GCC 4.5.x (issue 1743).
-
- Bug fixes and performance improvements on all platforms.
-
-
-2011-10-05: Version 3.6.5
-
- New incremental garbage collector.
-
- Removed the hard heap size limit (soft heap size limit is still
- 700/1400Mbytes by default).
-
- Implemented ES5 generic Array.prototype.toString (Issue 1361).
-
- V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
-
- Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
-
- Fixed propertyIsEnumerable for numeric properties (Issue 1692).
-
- Fixed the MinGW and Windows 2000 builds.
-
- Fixed "Prototype chain is not searched if named property handler does
- not set a property" (Issue 1636).
-
- Made the RegExp.prototype object be a RegExp object (Issue 1217).
-
- Disallowed future reserved words as labels in strict mode.
-
- Fixed string split to correctly coerce the separator to a string
- (Issue 1711).
-
- API: Added an optional source length field to the Extension
- constructor.
-
- API: Added Debug::DisableAgent to match existing Debug::EnableAgent
- (Issue 1573).
-
- Added "native" target to Makefile for the benefit of Linux distros.
-
- Fixed: debugger stops stepping outside evaluate (Issue 1639).
-
- More work on ES-Harmony proxies. Still hidden behind a flag.
-
- Bug fixes and performance improvements on all platforms.
-
-
2011-09-15: Version 3.6.4
Fixed d8's broken readline history.
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 76f45d7f2e..a7b27317a3 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -32,7 +32,6 @@ LINK ?= "g++"
OUTDIR ?= out
TESTJOBS ?= -j16
GYPFLAGS ?=
-TESTFLAGS ?=
# Special build flags. Use them like this: "make library=shared"
@@ -51,10 +50,6 @@ endif
ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1
endif
-# objectprint=on
-ifeq ($(objectprint), on)
- GYPFLAGS += -Dv8_object_print=1
-endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
@@ -77,21 +72,12 @@ endif
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
endif
-# werror=no
-ifeq ($(werror), no)
- GYPFLAGS += -Dwerror=''
-endif
-# presubmit=no
-ifeq ($(presubmit), no)
- TESTFLAGS += --no-presubmit
-endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release"
-# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - default (no target specified): build all ARCHES and MODES
# - "check": build all targets and run all tests
@@ -117,7 +103,7 @@ CHECKS = $(addsuffix .check,$(BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
-.PHONY: all check clean dependencies $(ENVFILE).new native \
+.PHONY: all check clean dependencies $(ENVFILE).new \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
@@ -138,31 +124,21 @@ $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
-native: $(OUTDIR)/Makefile-native
- @$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
- CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
-
# Test targets.
check: all
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
- $(TESTFLAGS)
+ @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
$(addsuffix .check,$(MODES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --mode=$(basename $@) $(TESTFLAGS)
+ --mode=$(basename $@)
$(addsuffix .check,$(ARCHES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch=$(basename $@) $(TESTFLAGS)
+ --arch=$(basename $@)
$(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(basename $@) $(TESTFLAGS)
-
-native.check: native
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
- --arch-and-mode=. $(TESTFLAGS)
+ --arch-and-mode=$(basename $@)
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)):
@@ -171,12 +147,7 @@ $(addsuffix .clean,$(ARCHES)):
rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
-native.clean:
- rm -f $(OUTDIR)/Makefile-native
- rm -rf $(OUTDIR)/native
- find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
-
-clean: $(addsuffix .clean,$(ARCHES)) native.clean
+clean: $(addsuffix .clean,$(ARCHES))
# GYP file generation targets.
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@@ -194,10 +165,6 @@ $(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-S-arm $(GYPFLAGS)
-$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
- build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
- -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
-
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 1dcdce4a8c..f9c33caae5 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -288,6 +288,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -381,7 +382,7 @@ MKSNAPSHOT_EXTRA_FLAGS = {
DTOA_EXTRA_FLAGS = {
'gcc': {
'all': {
- 'WARNINGFLAGS': ['-Wno-uninitialized'],
+ 'WARNINGFLAGS': ['-Werror', '-Wno-uninitialized'],
'CCFLAGS': GCC_DTOA_EXTRA_CCFLAGS
}
},
diff --git a/deps/v8/benchmarks/spinning-balls/index.html b/deps/v8/benchmarks/spinning-balls/index.html
deleted file mode 100644
index d01f31f373..0000000000
--- a/deps/v8/benchmarks/spinning-balls/index.html
+++ /dev/null
@@ -1,11 +0,0 @@
-<html>
-<head>
- <style>
- body { text-align: center; }
- </style>
-</head>
-<body>
- <script type="text/javascript" src="splay-tree.js"></script>
- <script type="text/javascript" src="v.js"></script>
-</body>
-</html>
diff --git a/deps/v8/benchmarks/spinning-balls/splay-tree.js b/deps/v8/benchmarks/spinning-balls/splay-tree.js
deleted file mode 100644
index a88e4cbce1..0000000000
--- a/deps/v8/benchmarks/spinning-balls/splay-tree.js
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/**
- * Constructs a Splay tree. A splay tree is a self-balancing binary
- * search tree with the additional property that recently accessed
- * elements are quick to access again. It performs basic operations
- * such as insertion, look-up and removal in O(log(n)) amortized time.
- *
- * @constructor
- */
-function SplayTree() {
-};
-
-
-/**
- * Pointer to the root node of the tree.
- *
- * @type {SplayTree.Node}
- * @private
- */
-SplayTree.prototype.root_ = null;
-
-
-/**
- * @return {boolean} Whether the tree is empty.
- */
-SplayTree.prototype.isEmpty = function() {
- return !this.root_;
-};
-
-
-/**
- * Inserts a node into the tree with the specified key and value if
- * the tree does not already contain a node with the specified key. If
- * the value is inserted, it becomes the root of the tree.
- *
- * @param {number} key Key to insert into the tree.
- * @param {*} value Value to insert into the tree.
- */
-SplayTree.prototype.insert = function(key, value) {
- if (this.isEmpty()) {
- this.root_ = new SplayTree.Node(key, value);
- return;
- }
- // Splay on the key to move the last node on the search path for
- // the key to the root of the tree.
- this.splay_(key);
- if (this.root_.key == key) {
- return;
- }
- var node = new SplayTree.Node(key, value);
- if (key > this.root_.key) {
- node.left = this.root_;
- node.right = this.root_.right;
- this.root_.right = null;
- } else {
- node.right = this.root_;
- node.left = this.root_.left;
- this.root_.left = null;
- }
- this.root_ = node;
-};
-
-
-/**
- * Removes a node with the specified key from the tree if the tree
- * contains a node with this key. The removed node is returned. If the
- * key is not found, an exception is thrown.
- *
- * @param {number} key Key to find and remove from the tree.
- * @return {SplayTree.Node} The removed node.
- */
-SplayTree.prototype.remove = function(key) {
- if (this.isEmpty()) {
- throw Error('Key not found: ' + key);
- }
- this.splay_(key);
- if (this.root_.key != key) {
- throw Error('Key not found: ' + key);
- }
- var removed = this.root_;
- if (!this.root_.left) {
- this.root_ = this.root_.right;
- } else {
- var right = this.root_.right;
- this.root_ = this.root_.left;
- // Splay to make sure that the new root has an empty right child.
- this.splay_(key);
- // Insert the original right child as the right child of the new
- // root.
- this.root_.right = right;
- }
- return removed;
-};
-
-
-/**
- * Returns the node having the specified key or null if the tree doesn't contain
- * a node with the specified key.
- *
- * @param {number} key Key to find in the tree.
- * @return {SplayTree.Node} Node having the specified key.
- */
-SplayTree.prototype.find = function(key) {
- if (this.isEmpty()) {
- return null;
- }
- this.splay_(key);
- return this.root_.key == key ? this.root_ : null;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the maximum key value.
- */
-SplayTree.prototype.findMax = function(opt_startNode) {
- if (this.isEmpty()) {
- return null;
- }
- var current = opt_startNode || this.root_;
- while (current.right) {
- current = current.right;
- }
- return current;
-};
-
-
-/**
- * @return {SplayTree.Node} Node having the maximum key value that
- * is less than the specified key value.
- */
-SplayTree.prototype.findGreatestLessThan = function(key) {
- if (this.isEmpty()) {
- return null;
- }
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- this.splay_(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- if (this.root_.key < key) {
- return this.root_;
- } else if (this.root_.left) {
- return this.findMax(this.root_.left);
- } else {
- return null;
- }
-};
-
-
-/**
- * @return {Array<*>} An array containing all the keys of tree's nodes.
- */
-SplayTree.prototype.exportKeys = function() {
- var result = [];
- if (!this.isEmpty()) {
- this.root_.traverse_(function(node) { result.push(node.key); });
- }
- return result;
-};
-
-
-/**
- * Perform the splay operation for the given key. Moves the node with
- * the given key to the top of the tree. If no node has the given
- * key, the last node on the search path is moved to the top of the
- * tree. This is the simplified top-down splaying algorithm from:
- * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
- *
- * @param {number} key Key to splay the tree on.
- * @private
- */
-SplayTree.prototype.splay_ = function(key) {
- if (this.isEmpty()) {
- return;
- }
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- var dummy, left, right;
- dummy = left = right = new SplayTree.Node(null, null);
- var current = this.root_;
- while (true) {
- if (key < current.key) {
- if (!current.left) {
- break;
- }
- if (key < current.left.key) {
- // Rotate right.
- var tmp = current.left;
- current.left = tmp.right;
- tmp.right = current;
- current = tmp;
- if (!current.left) {
- break;
- }
- }
- // Link right.
- right.left = current;
- right = current;
- current = current.left;
- } else if (key > current.key) {
- if (!current.right) {
- break;
- }
- if (key > current.right.key) {
- // Rotate left.
- var tmp = current.right;
- current.right = tmp.left;
- tmp.left = current;
- current = tmp;
- if (!current.right) {
- break;
- }
- }
- // Link left.
- left.right = current;
- left = current;
- current = current.right;
- } else {
- break;
- }
- }
- // Assemble.
- left.right = current.left;
- right.left = current.right;
- current.left = dummy.right;
- current.right = dummy.left;
- this.root_ = current;
-};
-
-
-/**
- * Constructs a Splay tree node.
- *
- * @param {number} key Key.
- * @param {*} value Value.
- */
-SplayTree.Node = function(key, value) {
- this.key = key;
- this.value = value;
-};
-
-
-/**
- * @type {SplayTree.Node}
- */
-SplayTree.Node.prototype.left = null;
-
-
-/**
- * @type {SplayTree.Node}
- */
-SplayTree.Node.prototype.right = null;
-
-
-/**
- * Performs an ordered traversal of the subtree starting at
- * this SplayTree.Node.
- *
- * @param {function(SplayTree.Node)} f Visitor function.
- * @private
- */
-SplayTree.Node.prototype.traverse_ = function(f) {
- var current = this;
- while (current) {
- var left = current.left;
- if (left) left.traverse_(f);
- f(current);
- current = current.right;
- }
-};
-
-SplayTree.prototype.traverseBreadthFirst = function (f) {
- if (f(this.root_.value)) return;
-
- var stack = [this.root_];
- var length = 1;
-
- while (length > 0) {
- var new_stack = new Array(stack.length * 2);
- var new_length = 0;
- for (var i = 0; i < length; i++) {
- var n = stack[i];
- var l = n.left;
- var r = n.right;
- if (l) {
- if (f(l.value)) return;
- new_stack[new_length++] = l;
- }
- if (r) {
- if (f(r.value)) return;
- new_stack[new_length++] = r;
- }
- }
- stack = new_stack;
- length = new_length;
- }
-};
diff --git a/deps/v8/benchmarks/spinning-balls/v.js b/deps/v8/benchmarks/spinning-balls/v.js
deleted file mode 100644
index 87366d9393..0000000000
--- a/deps/v8/benchmarks/spinning-balls/v.js
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-/**
- * This function provides requestAnimationFrame in a cross browser way.
- * http://paulirish.com/2011/requestanimationframe-for-smart-animating/
- */
-if ( !window.requestAnimationFrame ) {
- window.requestAnimationFrame = ( function() {
- return window.webkitRequestAnimationFrame ||
- window.mozRequestAnimationFrame ||
- window.oRequestAnimationFrame ||
- window.msRequestAnimationFrame ||
- function(callback, element) {
- window.setTimeout( callback, 1000 / 60 );
- };
- } )();
-}
-
-var kNPoints = 8000;
-var kNModifications = 20;
-var kNVisiblePoints = 200;
-var kDecaySpeed = 20;
-
-var kPointRadius = 4;
-var kInitialLifeForce = 100;
-
-var livePoints = void 0;
-var dyingPoints = void 0;
-var scene = void 0;
-var renderingStartTime = void 0;
-var scene = void 0;
-var pausePlot = void 0;
-var splayTree = void 0;
-
-
-function Point(x, y, z, payload) {
- this.x = x;
- this.y = y;
- this.z = z;
-
- this.next = null;
- this.prev = null;
- this.payload = payload;
- this.lifeForce = kInitialLifeForce;
-}
-
-
-Point.prototype.color = function () {
- return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")";
-};
-
-
-Point.prototype.decay = function () {
- this.lifeForce -= kDecaySpeed;
- return this.lifeForce <= 0;
-};
-
-
-function PointsList() {
- this.head = null;
- this.count = 0;
-}
-
-
-PointsList.prototype.add = function (point) {
- if (this.head !== null) this.head.prev = point;
- point.next = this.head;
- this.head = point;
- this.count++;
-}
-
-
-PointsList.prototype.remove = function (point) {
- if (point.next !== null) {
- point.next.prev = point.prev;
- }
- if (point.prev !== null) {
- point.prev.next = point.next;
- } else {
- this.head = point.next;
- }
- this.count--;
-}
-
-
-function GeneratePayloadTree(depth, tag) {
- if (depth == 0) {
- return {
- array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
- string : 'String for key ' + tag + ' in leaf node'
- };
- } else {
- return {
- left: GeneratePayloadTree(depth - 1, tag),
- right: GeneratePayloadTree(depth - 1, tag)
- };
- }
-}
-
-
-// To make the benchmark results predictable, we replace Math.random
-// with a 100% deterministic alternative.
-Math.random = (function() {
- var seed = 49734321;
- return function() {
- // Robert Jenkins' 32 bit integer hash function.
- seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
- seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
- seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
- seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
- seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
- seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
- return (seed & 0xfffffff) / 0x10000000;
- };
-})();
-
-
-function GenerateKey() {
- // The benchmark framework guarantees that Math.random is
- // deterministic; see base.js.
- return Math.random();
-}
-
-function CreateNewPoint() {
- // Insert new node with a unique key.
- var key;
- do { key = GenerateKey(); } while (splayTree.find(key) != null);
-
- var point = new Point(Math.random() * 40 - 20,
- Math.random() * 40 - 20,
- Math.random() * 40 - 20,
- GeneratePayloadTree(5, "" + key));
-
- livePoints.add(point);
-
- splayTree.insert(key, point);
- return key;
-}
-
-function ModifyPointsSet() {
- if (livePoints.count < kNPoints) {
- for (var i = 0; i < kNModifications; i++) {
- CreateNewPoint();
- }
- } else if (kNModifications === 20) {
- kNModifications = 80;
- kDecay = 30;
- }
-
- for (var i = 0; i < kNModifications; i++) {
- var key = CreateNewPoint();
- var greatest = splayTree.findGreatestLessThan(key);
- if (greatest == null) {
- var point = splayTree.remove(key).value;
- } else {
- var point = splayTree.remove(greatest.key).value;
- }
- livePoints.remove(point);
- point.payload = null;
- dyingPoints.add(point);
- }
-}
-
-
-function PausePlot(width, height, size) {
- var canvas = document.createElement("canvas");
- canvas.width = this.width = width;
- canvas.height = this.height = height;
- document.body.appendChild(canvas);
-
- this.ctx = canvas.getContext('2d');
-
- this.maxPause = 0;
- this.size = size;
-
- // Initialize cyclic buffer for pauses.
- this.pauses = new Array(this.size);
- this.start = this.size;
- this.idx = 0;
-}
-
-
-PausePlot.prototype.addPause = function (p) {
- if (this.idx === this.size) {
- this.idx = 0;
- }
-
- if (this.idx === this.start) {
- this.start++;
- }
-
- if (this.start === this.size) {
- this.start = 0;
- }
-
- this.pauses[this.idx++] = p;
-};
-
-
-PausePlot.prototype.iteratePauses = function (f) {
- if (this.start < this.idx) {
- for (var i = this.start; i < this.idx; i++) {
- f.call(this, i - this.start, this.pauses[i]);
- }
- } else {
- for (var i = this.start; i < this.size; i++) {
- f.call(this, i - this.start, this.pauses[i]);
- }
-
- var offs = this.size - this.start;
- for (var i = 0; i < this.idx; i++) {
- f.call(this, i + offs, this.pauses[i]);
- }
- }
-};
-
-
-PausePlot.prototype.draw = function () {
- var first = null;
- this.iteratePauses(function (i, v) {
- if (first === null) {
- first = v;
- }
- this.maxPause = Math.max(v, this.maxPause);
- });
-
- var dx = this.width / this.size;
- var dy = this.height / this.maxPause;
-
- this.ctx.save();
- this.ctx.clearRect(0, 0, 480, 240);
- this.ctx.beginPath();
- this.ctx.moveTo(1, dy * this.pauses[this.start]);
- var p = first;
- this.iteratePauses(function (i, v) {
- var delta = v - p;
- var x = 1 + dx * i;
- var y = dy * v;
- this.ctx.lineTo(x, y);
- if (delta > 2 * (p / 3)) {
- this.ctx.font = "bold 12px sans-serif";
- this.ctx.textBaseline = "bottom";
- this.ctx.fillText(v + "ms", x + 2, y);
- }
- p = v;
- });
- this.ctx.strokeStyle = "black";
- this.ctx.stroke();
- this.ctx.restore();
-}
-
-
-function Scene(width, height) {
- var canvas = document.createElement("canvas");
- canvas.width = width;
- canvas.height = height;
- document.body.appendChild(canvas);
-
- this.ctx = canvas.getContext('2d');
- this.width = canvas.width;
- this.height = canvas.height;
-
- // Projection configuration.
- this.x0 = canvas.width / 2;
- this.y0 = canvas.height / 2;
- this.z0 = 100;
- this.f = 1000; // Focal length.
-
- // Camera is rotating around y-axis.
- this.angle = 0;
-}
-
-
-Scene.prototype.drawPoint = function (x, y, z, color) {
- // Rotate the camera around y-axis.
- var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle);
- var ry = y;
- var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle);
-
- // Perform perspective projection.
- var px = (this.f * rx) / (rz - this.z0) + this.x0;
- var py = (this.f * ry) / (rz - this.z0) + this.y0;
-
- this.ctx.save();
- this.ctx.fillStyle = color
- this.ctx.beginPath();
- this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true);
- this.ctx.fill();
- this.ctx.restore();
-};
-
-
-Scene.prototype.drawDyingPoints = function () {
- var point_next = null;
- for (var point = dyingPoints.head; point !== null; point = point_next) {
- // Rotate the scene around y-axis.
- scene.drawPoint(point.x, point.y, point.z, point.color());
-
- point_next = point.next;
-
- // Decay the current point and remove it from the list
- // if it's life-force ran out.
- if (point.decay()) {
- dyingPoints.remove(point);
- }
- }
-};
-
-
-Scene.prototype.draw = function () {
- this.ctx.save();
- this.ctx.clearRect(0, 0, this.width, this.height);
- this.drawDyingPoints();
- this.ctx.restore();
-
- this.angle += Math.PI / 90.0;
-};
-
-
-function render() {
- if (typeof renderingStartTime === 'undefined') {
- renderingStartTime = Date.now();
- }
-
- ModifyPointsSet();
-
- scene.draw();
-
- var renderingEndTime = Date.now();
- var pause = renderingEndTime - renderingStartTime;
- pausePlot.addPause(pause);
- renderingStartTime = renderingEndTime;
-
- pausePlot.draw();
-
- div.innerHTML =
- livePoints.count + "/" + dyingPoints.count + " " +
- pause + "(max = " + pausePlot.maxPause + ") ms" ;
-
- // Schedule next frame.
- requestAnimationFrame(render);
-}
-
-
-function init() {
- livePoints = new PointsList;
- dyingPoints = new PointsList;
-
- splayTree = new SplayTree();
-
- scene = new Scene(640, 480);
-
- div = document.createElement("div");
- document.body.appendChild(div);
-
- pausePlot = new PausePlot(480, 240, 160);
-}
-
-
-init();
-render();
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index a6579ed9e1..4e896e019a 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -60,8 +60,6 @@
'v8_enable_disassembler%': 0,
- 'v8_object_print%': 0,
-
'v8_enable_gdbjit%': 0,
# Enable profiling support. Only required on Windows.
@@ -74,7 +72,6 @@
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false',
- 'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
@@ -87,9 +84,6 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
- ['v8_object_print==1', {
- 'defines': ['OBJECT_PRINT',],
- }],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@@ -190,9 +184,6 @@
}],
],
}],
- ['OS=="solaris"', {
- 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
- }],
],
'configurations': {
'Debug': {
@@ -227,7 +218,7 @@
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
- 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor' ],
}],
],
@@ -270,6 +261,7 @@
}],
['OS=="win"', {
'msvs_configuration_attributes': {
+ 'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index f24d9f8341..cb5e133039 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -35,30 +35,25 @@
'msvs_multi_core_compile%': '1',
'variables': {
'variables': {
- 'variables': {
- 'conditions': [
- [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
- # This handles the Linux platforms we generally deal with. Anything
- # else gets passed through, which probably won't work very well; such
- # hosts should pass an explicit target_arch to gyp.
- 'host_arch%':
- '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
- }, { # OS!="linux" and OS!="freebsd" and OS!="openbsd"
- 'host_arch%': 'ia32',
- }],
- ],
- },
- 'host_arch%': '<(host_arch)',
- 'target_arch%': '<(host_arch)',
+ 'conditions': [
+ [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+ # This handles the Linux platforms we generally deal with. Anything
+ # else gets passed through, which probably won't work very well; such
+ # hosts should pass an explicit target_arch to gyp.
+ 'host_arch%':
+ '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
+ }, { # OS!="linux" and OS!="freebsd" and OS!="openbsd"
+ 'host_arch%': 'ia32',
+ }],
+ ],
},
'host_arch%': '<(host_arch)',
- 'target_arch%': '<(target_arch)',
+ 'target_arch%': '<(host_arch)',
'v8_target_arch%': '<(target_arch)',
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
- 'werror%': '-Werror',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="x64" and host_arch!="x64")', {
@@ -79,7 +74,7 @@
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'target_defaults': {
- 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
'-fno-exceptions', '-pedantic' ],
'ldflags': [ '-pthread', ],
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 9e85dc462c..504cbfed59 100755..100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -340,11 +340,6 @@ class EXPORT Debug {
bool wait_for_connection = false);
/**
- * Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
- */
- static void DisableAgent();
-
- /**
* Makes V8 process all pending debug messages.
*
* From V8 point of view all debug messages come asynchronously (e.g. from
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 73b7fbe4c4..4b7f6e735f 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1171,8 +1171,7 @@ class String : public Primitive {
* Get the ExternalAsciiStringResource for an external ASCII string.
* Returns NULL if IsExternalAscii() doesn't return true.
*/
- V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
- const;
+ V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
static inline String* Cast(v8::Value* obj);
@@ -2452,42 +2451,24 @@ class V8EXPORT TypeSwitch : public Data {
// --- Extensions ---
-class V8EXPORT ExternalAsciiStringResourceImpl
- : public String::ExternalAsciiStringResource {
- public:
- ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
- ExternalAsciiStringResourceImpl(const char* data, size_t length)
- : data_(data), length_(length) {}
- const char* data() const { return data_; }
- size_t length() const { return length_; }
-
- private:
- const char* data_;
- size_t length_;
-};
/**
* Ignore
*/
class V8EXPORT Extension { // NOLINT
public:
- // Note that the strings passed into this constructor must live as long
- // as the Extension itself.
Extension(const char* name,
const char* source = 0,
int dep_count = 0,
- const char** deps = 0,
- int source_length = -1);
+ const char** deps = 0);
virtual ~Extension() { }
virtual v8::Handle<v8::FunctionTemplate>
GetNativeFunction(v8::Handle<v8::String> name) {
return v8::Handle<v8::FunctionTemplate>();
}
- const char* name() const { return name_; }
- size_t source_length() const { return source_length_; }
- const String::ExternalAsciiStringResource* source() const {
- return &source_; }
+ const char* name() { return name_; }
+ const char* source() { return source_; }
int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; }
void set_auto_enable(bool value) { auto_enable_ = value; }
@@ -2495,8 +2476,7 @@ class V8EXPORT Extension { // NOLINT
private:
const char* name_;
- size_t source_length_; // expected to initialize before source_
- ExternalAsciiStringResourceImpl source_;
+ const char* source_;
int dep_count_;
const char** deps_;
bool auto_enable_;
@@ -3518,9 +3498,9 @@ class V8EXPORT Context {
*
* v8::Locker is a scoped lock object. While it's
* active (i.e. between its construction and destruction) the current thread is
- * allowed to use the locked isolate. V8 guarantees that an isolate can be
- * locked by at most one thread at any time. In other words, the scope of a
- * v8::Locker is a critical section.
+ * allowed to use the locked isolate. V8 guarantees that an isolate can be locked
+ * by at most one thread at any time. In other words, the scope of a v8::Locker is
+ * a critical section.
*
* Sample usage:
* \code
@@ -3622,8 +3602,8 @@ class V8EXPORT Locker {
static void StopPreemption();
/**
- * Returns whether or not the locker for a given isolate, or default isolate
- * if NULL is given, is locked by the current thread.
+ * Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
+ * is locked by the current thread.
*/
static bool IsLocked(Isolate* isolate = NULL);
@@ -3789,7 +3769,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
- static const int kJSObjectType = 0xa6;
+ static const int kJSObjectType = 0xa3;
static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85;
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index f3ae8078ba..52607f15c5 100755..100644
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -84,7 +84,6 @@ SOURCES = {
hydrogen.cc
hydrogen-instructions.cc
ic.cc
- incremental-marking.cc
inspector.cc
interpreter-irregexp.cc
isolate.cc
@@ -134,7 +133,6 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
- store-buffer.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index a03b7411c3..479be5af15 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -185,10 +185,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker;
heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current();
- // BUG(1718):
- // Don't use the take_snapshot since we don't support HeapIterator here
- // without doing a special GC.
- isolate->heap()->RecordStats(&heap_stats, false);
+ isolate->heap()->RecordStats(&heap_stats, take_snapshot);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
@@ -504,12 +501,9 @@ void RegisterExtension(Extension* that) {
Extension::Extension(const char* name,
const char* source,
int dep_count,
- const char** deps,
- int source_length)
+ const char** deps)
: name_(name),
- source_length_(source_length >= 0 ?
- source_length : (source ? strlen(source) : 0)),
- source_(source, source_length_),
+ source_(source),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) { }
@@ -1413,7 +1407,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
}
@@ -1422,10 +1416,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUC16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
} else {
i::GenericStringUC16CharacterStream stream(str, 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
}
}
@@ -1787,7 +1781,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
- i::Handle<i::Object> argv[],
+ i::Object** argv[],
bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@@ -1804,10 +1798,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> data,
bool* has_pending_exception) {
- i::Handle<i::Object> argv[] = { data };
+ i::Object** argv[1] = { data.location() };
return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(),
- ARRAY_SIZE(argv),
+ 1,
argv,
has_pending_exception);
}
@@ -2627,11 +2621,10 @@ bool Value::Equals(Handle<Value> that) const {
if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other;
}
- i::Handle<i::Object> args[] = { other };
+ i::Object** args[1] = { other.location() };
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result =
- CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
- &has_pending_exception);
+ CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
@@ -3211,10 +3204,21 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+ self,
+ i::JSObject::ALLOW_CREATION));
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
- return *result == *self;
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> obj = i::SetProperty(
+ hidden_props,
+ key_obj,
+ value_obj,
+ static_cast<PropertyAttributes>(None),
+ i::kNonStrictMode);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return true;
}
@@ -3224,9 +3228,20 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+ self,
+ i::JSObject::OMIT_CREATION));
+ if (hidden_props->IsUndefined()) {
+ return v8::Local<v8::Value>();
+ }
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
- if (result->IsUndefined()) return v8::Local<v8::Value>();
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
+ if (result->IsUndefined()) {
+ return v8::Local<v8::Value>();
+ }
return Utils::ToLocal(result);
}
@@ -3237,9 +3252,15 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+ self,
+ i::JSObject::OMIT_CREATION));
+ if (hidden_props->IsUndefined()) {
+ return true;
+ }
+ i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- self->DeleteHiddenProperty(*key_obj);
- return true;
+ return i::DeleteProperty(js_obj, key_obj)->IsTrue();
}
@@ -3289,12 +3310,22 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
i::Handle<i::ExternalArray> array =
isolate->factory()->NewExternalArray(length, array_type, data);
- i::Handle<i::Map> external_array_map =
- isolate->factory()->GetElementsTransitionMap(
- object,
- GetElementsKindFromExternalArrayType(array_type));
-
- object->set_map(*external_array_map);
+ // If the object already has external elements, create a new, unique
+ // map if the element type is now changing, because assumptions about
+ // generated code based on the receiver's map will be invalid.
+ i::Handle<i::HeapObject> elements(object->elements());
+ bool cant_reuse_map =
+ elements->map()->IsUndefined() ||
+ !elements->map()->has_external_array_elements() ||
+ elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
+ if (cant_reuse_map) {
+ i::Handle<i::Map> external_array_map =
+ isolate->factory()->GetElementsTransitionMap(
+ i::Handle<i::Map>(object->map()),
+ GetElementsKindFromExternalArrayType(array_type),
+ object->HasFastProperties());
+ object->set_map(*external_array_map);
+ }
object->set_elements(*array);
}
@@ -3453,8 +3484,7 @@ bool v8::Object::IsCallable() {
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
- int argc,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@@ -3465,7 +3495,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
if (obj->IsJSFunction()) {
fun = i::Handle<i::JSFunction>::cast(obj);
@@ -3495,7 +3525,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
if (obj->IsJSFunction()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
EXCEPTION_PREAMBLE(isolate);
@@ -3537,7 +3567,7 @@ Local<v8::Object> Function::NewInstance(int argc,
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
@@ -3558,7 +3588,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
- i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+ i::Object*** args = reinterpret_cast<i::Object***>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
@@ -3769,11 +3799,10 @@ bool v8::String::IsExternalAscii() const {
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- const v8::String::ExternalStringResource* expected;
+ v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) {
- const void* resource =
- i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- expected = reinterpret_cast<const ExternalStringResource*>(resource);
+ void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+ expected = reinterpret_cast<ExternalStringResource*>(resource);
} else {
expected = NULL;
}
@@ -3781,7 +3810,7 @@ void v8::String::VerifyExternalStringResource(
}
-const v8::String::ExternalAsciiStringResource*
+v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(),
@@ -3789,9 +3818,8 @@ const v8::String::ExternalAsciiStringResource*
return NULL;
}
if (i::StringShape(*str).IsExternalAscii()) {
- const void* resource =
- i::Handle<i::ExternalAsciiString>::cast(str)->resource();
- return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
+ void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+ return reinterpret_cast<ExternalAsciiStringResource*>(resource);
} else {
return NULL;
}
@@ -3981,7 +4009,7 @@ bool v8::V8::IdleNotification() {
void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return;
- isolate->heap()->CollectAllAvailableGarbage();
+ isolate->heap()->CollectAllGarbage(true);
}
@@ -4500,7 +4528,6 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
- if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
@@ -5453,12 +5480,6 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
wait_for_connection);
}
-
-void Debug::DisableAgent() {
- return i::Isolate::Current()->debugger()->StopAgent();
-}
-
-
void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true);
}
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 93cecf52b6..3e19a45385 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -77,11 +77,6 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
- if (host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
}
@@ -106,10 +101,6 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (host() != NULL && target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
}
@@ -140,12 +131,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
}
@@ -162,11 +147,6 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
}
@@ -215,7 +195,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -241,7 +221,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
+ StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 329493a340..0ec36921ab 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -78,9 +78,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
- ASSERT(supported_ == 0 || supported_ == standard_features);
+ ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -88,7 +86,8 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
- supported_ |= standard_features;
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= CpuFeaturesImpliedByCompiler();
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2506,8 +2505,7 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2539,7 +2537,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index d19b64da54..9a586936fe 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1209,10 +1209,6 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
@@ -1267,6 +1263,12 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+ // Read/patch instructions
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
// Decode branch instruction at pos and return branch target pos
int target_at(int pos);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 32b7896a52..60d2081c29 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -582,11 +582,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- }
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ LeaveInternalFrame();
__ pop(function);
__ mov(argument, r0);
__ b(&argument_is_string);
@@ -602,11 +601,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
+ __ EnterInternalFrame();
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
__ Ret();
}
@@ -619,12 +617,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label slow, non_function_call;
+ Label non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
+ __ b(ne, &non_function_call);
// Jump to the function-specific construct stub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -633,19 +631,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r2: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
__ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -661,329 +650,321 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ __ EnterConstructFrame();
+
+ // Preserve the two incoming parameters on the stack.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ push(r0); // Smi-tagged arguments count.
+ __ push(r1); // Constructor function.
+
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ mov(r2, Operand(debug_step_in_fp));
+ __ ldr(r2, MemOperand(r2));
+ __ tst(r2, r2);
+ __ b(ne, &rt_call);
#endif
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
+ // Load the initial map and verify that it is in fact a map.
+ // r1: constructor function
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &rt_call);
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ b(ne, &rt_call);
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // r1: constructor function
+ // r2: initial map
+ __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+ __ b(eq, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+ __ ldrb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ strb(r4, constructor_count);
+ __ b(ne, &allocate);
+
+ __ Push(r1, r2);
+
+ __ push(r1); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(r2);
+ __ pop(r1);
+
+ __ bind(&allocate);
+ }
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ // Now allocate the JSObject on the heap.
+ // r1: constructor function
+ // r2: initial map
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size
+ // r4: JSObject (not tagged)
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(r5, r4);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size (in words)
+ // r4: JSObject (not tagged)
+ // r5: First in-object property of JSObject (not tagged)
+ __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ { Label loop, entry;
if (count_constructions) {
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
- // r0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(r0, r6);
- __ Assert(le, "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(r5, r0, r7);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
- __ InitializeFieldsWithFiller(r5, r6, r7);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
- // r1: constructor function
- // r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
- // r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated
- // r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
+ __ b(&entry);
+ __ bind(&loop);
+ __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(r5, r6);
+ __ b(lt, &loop);
}
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ __ add(r4, r4, Operand(kHeapObjectTag));
- // Receiver for constructor call allocated.
+ // Check if a non-empty properties array is needed. Continue with allocated
+ // object if not fall through to runtime call if it is.
+ // r1: constructor function
+ // r4: JSObject
+ // r5: start of next object (not tagged)
+ __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields and
+ // in-object properties.
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
+ __ sub(r3, r3, Operand(r6), SetCC);
+
+ // Done if no extra properties are to be allocated.
+ __ b(eq, &allocated);
+ __ Assert(pl, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: start of next object
+ __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // r1: constructor
+ // r3: number of elements in properties array
// r4: JSObject
- __ bind(&allocated);
- __ push(r4);
-
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, kPointerSize));
- __ push(r1); // Constructor function.
- __ push(r4); // Receiver.
-
- // Reload the number of arguments from the stack.
+ // r5: FixedArray (not tagged)
+ __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+ __ mov(r2, r5);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+ // Initialize the fields to undefined.
// r1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
- // Setup pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Setup number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r2: address of last argument (caller sp)
+ // r2: First element of FixedArray (not tagged)
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ cmp(r7, r8);
+ __ Assert(eq, "Undefined value not loaded.");
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(r2, r6);
+ __ b(lt, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
// r1: constructor function
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
- // Call the function.
- // r0: number of arguments
+ // Continue with JSObject being successfully allocated
// r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // r4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // r4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(r4, r5);
+ }
- // Pop the function from the stack.
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ pop();
+ // Allocate the new receiver object using the runtime call.
+ // r1: constructor function
+ __ bind(&rt_call);
+ __ push(r1); // argument for Runtime_NewObject
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(r4, r0);
+
+ // Receiver for constructor call allocated.
+ // r4: JSObject
+ __ bind(&allocated);
+ __ push(r4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ push(r1); // Constructor function.
+ __ push(r4); // Receiver.
+
+ // Reload the number of arguments from the stack.
+ // r1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below
+ __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+ // Copy arguments and receiver to the expression stack.
+ // r0: number of arguments
+ // r2: address of last argument (caller sp)
+ // r1: constructor function
+ // r3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r3, r3, Operand(2), SetCC);
+ __ b(ge, &loop);
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-
- // Leave construct frame.
+ // Call the function.
+ // r0: number of arguments
+ // r1: constructor function
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
+ // Pop the function from the stack.
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ pop();
+
+ // Restore context from the frame.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+ __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -1016,64 +997,63 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: argv
// r5-r7, cp may be clobbered
- // Clear the context before we push it when entering the internal frame.
+ // Clear the context before we push it when entering the JS frame.
__ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Set up the context from the function argument.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(r10, Operand(roots_address));
+ // Set up the roots register.
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
+ __ mov(r10, Operand(roots_address));
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
+ // Push the function and the receiver onto the stack.
+ __ push(r1);
+ __ push(r2);
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
+ // Copy arguments to the stack in a loop.
+ // r1: function
+ // r3: argc
+ // r4: argv, i.e. points to first arg
+ Label loop, entry;
+ __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+ // r2 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r0, MemOperand(r0)); // dereference handle
+ __ push(r0); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, r2);
+ __ b(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mov(r5, Operand(r4));
+ __ mov(r6, Operand(r4));
+ __ mov(r7, Operand(r4));
+ if (kR9Available == 1) {
+ __ mov(r9, Operand(r4));
+ }
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
- // Exit the JS frame and remove the parameters (except function), and
- // return.
- // Respect ABI stack constraint.
+ // Invoke the code and pass argc as r0.
+ __ mov(r0, Operand(r3));
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
+
+ // Exit the JS frame and remove the parameters (except function), and return.
+ // Respect ABI stack constraint.
+ __ LeaveInternalFrame();
__ Jump(lr);
// r0: result
@@ -1092,27 +1072,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1121,27 +1100,26 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1150,13 +1128,12 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- }
+ __ EnterInternalFrame();
+ // Pass the function and deoptimization type to the runtime system.
+ __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it -> r6.
__ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1196,10 +1173,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
+ __ EnterInternalFrame();
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ __ LeaveInternalFrame();
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
@@ -1215,11 +1191,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1301,23 +1276,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ __ push(r0);
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
-
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(r2, r0);
-
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-
- // Exit the internal frame.
- }
+ __ push(r2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(r2, r0);
+ __ pop(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ LeaveInternalFrame();
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1437,157 +1406,156 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
- __ push(r1);
-
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
+ __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub(r2, sp, r2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(gt, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ push(r1);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(r0); // limit
+ __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ push(r1);
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
+ // Get the receiver.
+ __ ldr(r0, MemOperand(fp, kRecvOffset));
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ b(&push_receiver);
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &push_receiver);
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
+ // Change context eagerly to get the right global object if necessary.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in r1.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &push_receiver);
+
+ // Do not transform the receiver for strict mode functions.
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(r0, &call_to_object);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ // r0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &push_receiver);
+
+ // Convert the receiver to a regular object.
+ // r0: receiver
+ __ bind(&call_to_object);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ b(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // r0: receiver
+ __ bind(&push_receiver);
+ __ push(r0);
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ b(&entry);
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r0: current argument index
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(fp, kArgsOffset));
+ __ push(r1);
+ __ push(r0);
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r0);
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
+ // Use inline caching to access the arguments.
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ add(r0, r0, Operand(1 << kSmiTagSize));
+ __ str(r0, MemOperand(fp, kIndexOffset));
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ldr(r1, MemOperand(fp, kLimitOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &loop);
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &call_proxy);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
- frame_scope.GenerateLeaveFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
+ // Tear down the internal frame and remove function, receiver and args.
+ __ LeaveInternalFrame();
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Jump(lr);
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(r1); // add function proxy as last argument
+ __ add(r0, r0, Operand(1));
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
- }
+ __ LeaveInternalFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 44923a1843..e65f6d9b69 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -189,72 +189,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Setup the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the global context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Setup the fixed slots.
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
-
- // Copy the global object from the previous context.
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -904,11 +838,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 0, 2);
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@@ -925,29 +857,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
- WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
-}
-
-
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1288,8 +1197,6 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
-
- AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@@ -1307,7 +1214,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1699,8 +1606,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@@ -1808,41 +1713,6 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ stm(db_w, sp, kCallerSaved | lr.bit());
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
- }
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = r1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- }
- __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
-}
-
-
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@@ -1996,13 +1866,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ __ LeaveInternalFrame();
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -2043,14 +1912,13 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- }
+ __ EnterInternalFrame();
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ __ LeaveInternalFrame();
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@@ -2160,10 +2028,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -3269,11 +3133,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@@ -3286,15 +3149,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
+ __ EnterInternalFrame();
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
__ Ret();
}
}
@@ -3311,7 +3173,6 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
- AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3407,14 +3268,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- }
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3440,14 +3298,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- }
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3464,37 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
- code->set_is_pregenerated(true);
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@@ -3606,7 +3430,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
+ __ ldr(r3, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@@ -3644,7 +3469,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3789,7 +3613,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(r5, Operand(isolate->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
+ __ ldr(r5, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@@ -4026,11 +3851,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
+ __ EnterInternalFrame();
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4426,6 +4250,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4652,7 +4480,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4703,7 +4532,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(isolate->factory()->the_hole_value()));
+ __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
+ __ ldr(r1, MemOperand(r1, 0));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@@ -4745,25 +4575,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
+ __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ mov(r2, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- r2,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ mov(r3, last_match_info_elements);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -4891,22 +4712,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Code* code) {
- code->set_has_function_cache(false);
-}
-
-
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- UNREACHABLE();
-}
-
-
-Object* CallFunctionStub::GetCachedValue(Address address) {
- UNREACHABLE();
- return NULL;
-}
-
-
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow, non_function;
@@ -5084,26 +4889,23 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ cmp(result_, Operand(ip));
__ b(ne, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ ldr(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
__ add(scratch_, scratch_, result_);
- __ ldr(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
+ __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ ldr(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(ne, &call_runtime_);
- // Actually fetch the parent string if it is confirmed to be sequential.
- STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
- __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -6623,13 +6425,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- }
+ __ EnterInternalFrame();
+ __ Push(r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ push(ip);
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6812,8 +6613,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@@ -6903,267 +6702,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { r6, r4, r7, EMIT_REMEMBERED_SET },
- { r6, r2, r7, EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { r3, r4, r5, EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { r4, r1, r2, OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { r1, r2, r3, EMIT_REMEMBERED_SET },
- { r3, r2, r1, EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { r2, r1, r3, EMIT_REMEMBERED_SET },
- { r3, r1, r2, EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { r4, r2, r3, EMIT_REMEMBERED_SET },
- // Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
-};
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(r0));
- __ Move(address, regs_.address());
- __ Move(r0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(r1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ ldr(r1, MemOperand(address, 0));
- }
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 3ba75bab13..557f7e6d41 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -58,25 +58,6 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -342,9 +323,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
-
private:
Register the_int_;
Register the_heap_number_;
@@ -393,225 +371,6 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
- ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
- ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- Assembler::kInstrSize);
-
- if (Assembler::IsBranch(first_instruction)) {
- return INCREMENTAL;
- }
-
- ASSERT(Assembler::IsTstImmediate(first_instruction));
-
- if (Assembler::IsBranch(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(Assembler::IsTstImmediate(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- masm->sub(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
- // Save all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP3);
- // Restore all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- masm->add(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
- }
- masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
@@ -816,8 +575,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -830,7 +587,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 3993ed02be..bf748a9b6a 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -38,16 +38,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
+ masm->EnterInternalFrame();
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
+ masm->LeaveInternalFrame();
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 1c0d508d2d..d27982abac 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -69,6 +69,16 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ // Constants related to patching of inlined load/store.
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ return FLAG_debug_code ? 32 : 13;
+ }
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
+ static int GetInlinedNamedStoreInstructionsAfterPatch() {
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ return Isolate::Current()->inlined_write_barrier_size() + 4;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index b866f9cc8d..07a22722c8 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -132,58 +132,56 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
- }
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ tst(reg, Operand(0xc0000000));
+ __ Assert(eq, "Unable to encode value as smi");
}
+ __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
- __ stm(db_w, sp, object_regs | non_object_regs);
}
+ __ stm(db_w, sp, object_regs | non_object_regs);
+ }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
+ __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ ldm(ia_w, sp, object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
}
}
-
- // Leave the internal frame.
}
+ __ LeaveInternalFrame();
+
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index bb03d740d1..00357f76db 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -112,19 +112,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
#endif
- Isolate* isolate = code->GetIsolate();
-
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
+ DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -141,8 +134,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@@ -177,13 +169,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
-
- RelocInfo rinfo(pc_after - 2 * kInstrSize,
- RelocInfo::CODE_TARGET,
- 0,
- unoptimized_code);
- unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- unoptimized_code, &rinfo, replacement_code);
}
@@ -208,9 +193,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->
- RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
}
@@ -650,10 +632,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@@ -707,11 +686,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index c66ceee931..26bbd82d00 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -70,16 +70,6 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
-// When calling into C++ (only for C++ calls that can't cause a GC).
-// The call code will take care of lr, fp, etc.
-static const RegList kCallerSaved =
- 1 << 0 | // r0
- 1 << 1 | // r1
- 1 << 2 | // r2
- 1 << 3 | // r3
- 1 << 9; // r9
-
-
static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 353ce5b106..50ed8b1da7 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -39,7 +39,6 @@
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
-#include "arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -156,11 +155,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@@ -206,12 +200,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
-
- // Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ mov(r1, Operand(Context::SlotOffset(var->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
}
@@ -269,7 +264,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -670,15 +665,12 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
-
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(scratch0,
+ Operand(Context::SlotOffset(var->index())),
+ scratch1,
+ src);
}
}
@@ -711,7 +703,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@@ -729,7 +721,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
@@ -754,16 +746,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ mov(r1, Operand(cp));
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
@@ -776,8 +762,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == VAR || mode == CONST || mode == LET);
- PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST ||
+ mode == Variable::LET);
+ PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -787,7 +775,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@@ -1217,23 +1205,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
+ if (var->mode() == Variable::DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
+ } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == LET) {
+ if (local->mode() == Variable::CONST) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
__ jmp(done);
}
@@ -1266,13 +1246,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != LET && var->mode() != CONST) {
+ if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET) {
+ if (var->mode() == Variable::LET) {
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
@@ -1510,23 +1490,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
- __ ldr(r6, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
+ __ ldr(r1, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ str(result_register(), FieldMemOperand(r1, offset));
- Label no_map_change;
- __ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with r0 as the scratch
// register.
- __ RecordWriteField(
- r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
- __ push(r6); // Copy of array literal.
- __ CallRuntime(Runtime::kNonSmiElementStored, 1);
- __ bind(&no_map_change);
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1873,7 +1844,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(r0); // Value.
@@ -1898,12 +1869,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(r1, Operand(offset), r2, r3);
}
}
- } else if (var->mode() != CONST) {
+ } else if (var->mode() != Variable::CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
@@ -1917,9 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
}
} else {
ASSERT(var->IsLookupSlot());
@@ -2139,8 +2107,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(r1);
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
- StrictModeFlag strict_mode =
- FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
+ StrictModeFlag strict_mode = strict_mode_flag();
+ if (FLAG_harmony_block_scoping) {
+ strict_mode = kStrictMode;
+ }
__ mov(r1, Operand(Smi::FromInt(strict_mode)));
__ push(r1);
@@ -2186,7 +2156,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
+ if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@@ -2692,24 +2662,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0.
__ b(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
-
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ b(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
+
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+ __ b(ge, &function);
+
+ // Check if the constructor in the map is a function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor);
@@ -2887,9 +2853,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(r2, r0);
- __ RecordWriteField(
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
context()->Plug(r0);
@@ -3177,31 +3141,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
- Label no_remembered_set;
- __ CheckPageFlag(elements,
- scratch1,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &no_remembered_set);
+ Label new_space;
+ __ InNewSpace(elements, scratch1, eq, &new_space);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
- // We are swapping two objects in an array and the incremental marker never
- // pauses in the middle of scanning a single object. Therefore the
- // incremental marker is not disturbed, so we don't need to call the
- // RecordWrite stub that notifies the incremental marker.
- __ RememberedSetHelper(elements,
- index1,
- scratch2,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
- __ RememberedSetHelper(elements,
- index2,
- scratch2,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
+ __ mov(scratch1, elements);
+ __ RecordWriteHelper(elements, index1, scratch2);
+ __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
- __ bind(&no_remembered_set);
+ __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3949,14 +3898,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@@ -3997,11 +3942,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
- Split(eq, if_true, if_false, fall_through);
+ __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ Split(ge, if_true, if_false, fall_through);
+
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
@@ -4020,7 +3963,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
- context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -4028,12 +3982,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
+
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4041,6 +3992,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+ context()->Plug(if_true, if_false);
+ return;
+ }
+
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@@ -4127,9 +4085,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4137,21 +4094,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(sub_expr);
+ VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
- if (expr->op() == Token::EQ_STRICT) {
+ if (expr->is_strict()) {
Split(eq, if_true, if_false, fall_through);
} else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
__ b(eq, if_true);
- __ LoadRoot(r1, other_nil_value);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 6e0badca1d..2e49cae928 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -208,8 +208,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch2, scratch1);
}
@@ -505,22 +504,21 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
+ // Push the receiver and the name of the function.
+ __ Push(r3, r2);
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ mov(r0, Operand(2));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- }
+ // Move result to r1 and leave the internal frame.
+ __ mov(r1, Operand(r0));
+ __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -652,13 +650,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- }
+ __ EnterInternalFrame();
+ __ push(r2); // save the key
+ __ Push(r1, r2); // pass the receiver and the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(r2); // restore the key
+ __ LeaveInternalFrame();
__ mov(r1, r0);
__ jmp(&do_call);
@@ -911,8 +908,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
__ add(r6, r3, r5);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(r3, r6, r9);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
@@ -920,8 +916,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
__ add(r6, r3, r4);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(r3, r6, r9);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -1272,17 +1267,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, array, extra, check_if_double_array;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
+ Label slow, fast, array, extra;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
- Register elements_map = r6;
- Register receiver_map = r7;
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1290,26 +1281,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, &slow);
+ __ cmp(r4, Operand(JS_PROXY_TYPE));
+ __ b(eq, &slow);
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode and writable.
+ __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r4, ip);
+ __ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
- __ b(lo, &fast_object_with_map_check);
+ __ b(lo, &fast);
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1330,31 +1330,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &check_if_double_array);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast_object_without_map_check);
-
- __ bind(&check_if_double_array);
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- // Add 1 to key, and go to common element store code for doubles.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r4, key, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ jmp(&fast_double_without_map_check);
+ __ b(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r4, ip);
+ __ b(ne, &slow);
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1362,57 +1352,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra);
// Fall through to fast case.
- __ bind(&fast_object_with_map_check);
- Register scratch_value = r4;
- Register address = r5;
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to slow case when writing non-smi into smi-only array.
- __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
- // Fast elements array, store the value to the elements backing store.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
+ __ bind(&fast);
+ // Fast case, store the value to the elements backing store.
+ __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(r5));
+ // Skip write barrier if the written value is a smi.
+ __ tst(value, Operand(kSmiTagMask));
+ __ Ret(eq);
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
+ __ sub(r4, r5, Operand(elements));
+ __ RecordWrite(elements, Operand(r4), r5, r6);
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements,
- r4,
- r5,
- r6,
- r7,
- &slow);
__ Ret();
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 84959397b6..30ccd05bee 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -212,11 +212,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
+ stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -712,9 +711,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ instr->set_environment(CreateEnvironment(hydrogen_env));
return instr;
}
@@ -997,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1013,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_,
value_count,
outer);
+ int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1021,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
@@ -1449,9 +1444,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1739,7 +1734,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->RequiresHoleCheck()
+ return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1753,11 +1748,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* temp = TempRegister();
- LOperand* value = UseTempRegister(instr->value());
- LInstruction* result = new LStoreGlobalCell(value, temp);
- if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
- return result;
+ if (instr->check_hole_value()) {
+ LOperand* temp = TempRegister();
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(new LStoreGlobalCell(value, temp));
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new LStoreGlobalCell(value, NULL);
+ }
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 73c7e459c3..8c18760fd1 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -107,7 +107,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
+ V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -627,17 +627,16 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
+class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
- explicit LIsNilAndBranch(LOperand* value) {
+ explicit LIsNullAndBranch(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
+ bool is_strict() const { return hydrogen()->is_strict(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -2160,8 +2159,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 70ef884816..f5d7449149 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -82,14 +82,6 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
-
- CodeStub::GenerateFPStubs();
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -214,11 +206,13 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
- // Update the write barrier. This clobbers r3 and r0.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
+ __ mov(r1, Operand(Context::SlotOffset(var->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
Comment(";;; End allocate local context");
@@ -268,9 +262,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@@ -748,7 +739,7 @@ void LCodeGen::RecordSafepoint(
int deoptimization_index) {
ASSERT(expected_safepoint_kind_ == kind);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@@ -1041,7 +1032,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
- virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
@@ -1753,35 +1743,25 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
+ // TODO(fsc): If the expression is known to be a smi, then it's
+ // definitely not null. Jump to the false block.
int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(ip, nil_value);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(reg, ip);
- if (instr->kind() == kStrictEquality) {
+ if (instr->is_strict()) {
EmitBranch(true_block, false_block, eq);
} else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label);
- __ LoadRoot(ip, other_nil_value);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(reg, ip);
__ b(eq, true_label);
__ JumpIfSmi(reg, false_label);
@@ -1938,36 +1918,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, is_false);
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- __ b(eq, is_true);
- __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
- __ b(eq, is_true);
+ __ b(ge, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(gt, is_false);
+ __ b(ge, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -2044,8 +2016,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+
Label* map_check() { return &map_check_; }
+
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -2207,7 +2180,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->check_hole_value()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
@@ -2230,7 +2203,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@@ -2239,7 +2211,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->check_hole_value()) {
+ Register scratch2 = ToRegister(instr->TempAt(0));
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2249,15 +2222,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-
- // Cells are always in the remembered set.
- __ RecordWriteField(scratch,
- JSGlobalPropertyCell::kValueOffset,
- value,
- scratch2,
- kLRHasBeenSaved,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET);
}
@@ -2283,15 +2247,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- MemOperand target = ContextOperand(context, instr->slot_index());
- __ str(value, target);
+ __ str(value, ContextOperand(context, instr->slot_index()));
if (instr->needs_write_barrier()) {
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch0(),
- kLRHasBeenSaved,
- kSaveFPRegs);
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, Operand(offset), value, scratch0());
}
}
@@ -2541,9 +2500,13 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ // TODO(danno): If no hole check is required, there is no need to allocate
+ // elements into a temporary register, instead scratch can be used.
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
__ vldr(result, elements, 0);
}
@@ -2614,7 +2577,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2944,7 +2906,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -3241,7 +3202,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3301,8 +3262,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ str(value, FieldMemOperand(object, offset));
if (instr->needs_write_barrier()) {
// Update the write barrier for the object for in-object properties.
- __ RecordWriteField(
- object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
+ __ RecordWrite(object, Operand(offset), value, scratch);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -3310,8 +3270,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWriteField(
- scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
+ __ RecordWrite(scratch, Operand(offset), value, object);
}
}
}
@@ -3342,13 +3301,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- __ tst(value, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3363,8 +3315,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs);
+ __ add(key, scratch, Operand(FixedArray::kHeaderSize));
+ __ RecordWrite(elements, key, value);
}
}
@@ -3465,7 +3417,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3501,7 +3452,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -3625,7 +3575,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3697,7 +3646,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -3763,7 +3711,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3872,6 +3819,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+ LTaggedToI* instr_;
+};
+
+
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0();
@@ -3954,16 +3911,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4396,12 +4343,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
- __ b(eq, true_label);
- __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
- final_branch_condition = eq;
+ __ CompareObjectType(input, input, scratch,
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ final_branch_condition = ge;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4523,7 +4468,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 711e4595e7..ead8489034 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -376,20 +376,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
+ : codegen_(codegen), external_exit_(NULL) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -400,7 +396,6 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
- int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 918f9ebe06..f37f310218 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -42,8 +42,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
+ allow_stub_calls_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -407,6 +406,32 @@ void MacroAssembler::StoreRoot(Register source,
}
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register address,
+ Register scratch) {
+ if (emit_debug_code()) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, ne, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
+
+ // Calculate page address.
+ Bfc(object, 0, kPageSizeBits);
+
+ // Calculate region number.
+ Ubfx(address, address, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
+
+ // Mark region dirty.
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ mov(ip, Operand(1));
+ orr(scratch, scratch, Operand(ip, LSL, address));
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+}
+
+
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -418,52 +443,38 @@ void MacroAssembler::InNewSpace(Register object,
}
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
+// Will clobber 4 registers: object, offset, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
+ Label done;
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch0, eq, &done);
- add(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- tst(dst, Operand((1 << kPointerSizeLog2) - 1));
- b(eq, &ok);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ // Add offset into the object.
+ add(scratch0, object, offset);
- RecordWrite(object,
- dst,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK);
+ // Record the actual write.
+ RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
- // Clobber clobbered input registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -473,94 +484,29 @@ void MacroAssembler::RecordWriteField(
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- tst(value, Operand(kSmiTagMask));
- b(eq, &done);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
// Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- push(lr);
- }
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(lr);
- }
+ RecordWriteHelper(object, address, scratch);
bind(&done);
- // Clobber clobbered registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
- }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
- Label done;
- if (FLAG_debug_code) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- ldr(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- str(address, MemOperand(scratch, kPointerSize, PostIndex));
- // Write back new top of buffer.
- str(scratch, MemOperand(ip));
- // Call stub on end of buffer.
- // Check for end of buffer.
- tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kFallThroughAtEnd) {
- b(eq, &done);
- } else {
- ASSERT(and_then == kReturnAtEnd);
- Ret(eq);
- }
- push(lr);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
- CallStub(&store_buffer_overflow);
- pop(lr);
- bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -1015,9 +961,6 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@@ -1045,9 +988,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -1071,9 +1011,6 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -1098,9 +1035,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
ASSERT(function->is_compiled());
// Get the function and setup the context.
@@ -1156,10 +1090,10 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
@@ -1859,127 +1793,13 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
- b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register receiver_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- cmp(exponent_reg, scratch1);
- b(ge, &maybe_nan);
-
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- str(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- b(gt, &is_nan);
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand(0));
- b(eq, &have_double_value);
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
-
- bind(&smi_value);
- add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- add(scratch1, scratch1,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch1 is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- vstr(d0, scratch1, 0);
- } else {
- str(mantissa_reg, MemOperand(scratch1, 0));
- str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
-}
-
-
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -2075,13 +1895,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2093,12 +1913,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2201,12 +2022,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
@@ -2602,7 +2417,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1);
+ stub.SaveDoubles();
CallStub(&stub);
}
@@ -2675,9 +2491,6 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
@@ -2809,20 +2622,14 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
+ CallRuntime(Runtime::kAbort, 2);
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -3123,19 +2930,6 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- b(&entry);
- bind(&loop);
- str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
- bind(&entry);
- cmp(start_offset, end_offset);
- b(lt, &loop);
-}
-
-
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
@@ -3295,15 +3089,23 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- mov(ip, Operand(function));
- CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(no_reg,
+ function,
+ ip,
+ num_reg_arguments,
+ num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+ Register scratch,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_reg_arguments,
+ num_double_arguments);
}
@@ -3314,15 +3116,17 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+ CallCFunction(function, scratch, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
int num_reg_arguments,
int num_double_arguments) {
- ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -3346,6 +3150,10 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ if (function.is(no_reg)) {
+ mov(scratch, Operand(function_reference));
+ function = scratch;
+ }
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@@ -3377,185 +3185,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
- and_(scratch, object, Operand(~Page::kPageAlignmentMask));
- ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- tst(scratch, Operand(mask));
- b(cc, condition_met);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(ip, Operand(mask_scratch));
- b(first_bit == 1 ? eq : ne, &other_color);
- // Shift left 1 by adding.
- add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
- b(eq, &word_boundary);
- tst(ip, Operand(mask_scratch));
- b(second_bit == 1 ? ne : eq, has_color);
- jmp(&other_color);
-
- bind(&word_boundary);
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- tst(ip, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color);
- bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- b(eq, &is_data_object);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, not_data_object);
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
- mov(ip, Operand(1));
- mov(mask_reg, Operand(ip, LSL, mask_reg));
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(mask_scratch, load_scratch);
- b(ne, &done);
-
- if (FLAG_debug_code) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- tst(load_scratch, Operand(mask_scratch, LSL, 1));
- b(eq, &ok);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
- b(eq, &is_data_object);
-
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- tst(instance_type, Operand(kExternalStringTag));
- mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
- b(ne, &is_data_object);
-
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- ldr(ip, FieldMemOperand(value, String::kLengthOffset));
- tst(instance_type, Operand(kStringEncodingMask));
- mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
- add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orr(ip, ip, Operand(mask_scratch));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, Operand(length));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
-}
-
-
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
}
@@ -3605,17 +3234,6 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 8ee468a917..6084fde2d3 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -29,7 +29,6 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
-#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -80,14 +79,6 @@ enum ObjectToDoubleFlags {
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -166,126 +157,40 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise
+ Label* branch);
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
+ // For the page containing |object| mark the region covering [address]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object,
+ Register address,
+ Register scratch);
- // Detects conservatively whether an object is data-only, ie it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- lr_status,
- save_fp,
- remembered_set_action,
- smi_check);
- }
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the ip register. RecordWrite updates the
+ // write barrier even when storing smis.
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
// Push a handle.
void Push(Handle<Object> handle);
@@ -413,6 +318,16 @@ class MacroAssembler: public Assembler {
const double imm,
const Condition cond = al);
+
+ // ---------------------------------------------------------------------------
+ // Activation frames
+
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -654,13 +569,6 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -700,31 +608,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register receiver_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail);
-
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -947,11 +830,11 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
- void CallCFunction(Register function,
+ void CallCFunction(Register function, Register scratch,
int num_reg_arguments,
int num_double_arguments);
@@ -1019,9 +902,6 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@@ -1168,12 +1048,10 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
private:
void CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
int num_reg_arguments,
int num_double_arguments);
@@ -1189,25 +1067,16 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@@ -1215,7 +1084,6 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
- bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index c876467938..cd76edbf15 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -371,12 +371,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
- }
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -614,12 +611,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 5704202622..6af535553f 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1618,8 +1618,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
- // Catch null pointers a little earlier.
- ASSERT(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 4558afe68a..f8565924b1 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -431,13 +431,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, r0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -450,13 +444,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, r0);
- __ RecordWriteField(scratch,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register r0).
@@ -565,10 +553,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(
- MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@@ -604,8 +591,6 @@ static MaybeObject* GenerateFastApiDirectCall(
ApiFunction fun(api_function_address);
const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
@@ -631,11 +616,9 @@ static MaybeObject* GenerateFastApiDirectCall(
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
- AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
-
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -811,7 +794,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label);
// Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@@ -828,8 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
-
- // Leave the internal frame.
+ __ LeaveInternalFrame();
}
void LoadWithInterceptor(MacroAssembler* masm,
@@ -838,19 +820,18 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
+ __ EnterInternalFrame();
+ __ Push(holder, name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1247,10 +1228,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ApiFunction fun(getter_address);
const int kApiStackSpace = 1;
-
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
-
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
@@ -1310,44 +1288,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
- // Leave the internal frame.
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
}
+ __ LeaveInternalFrame();
+
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
if (interceptor_holder != lookup->holder()) {
@@ -1580,7 +1556,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1595,15 +1571,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements);
- // Check if value is a smi.
- Label with_write_barrier;
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(r4, &with_write_barrier);
-
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1613,31 +1585,14 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
+ __ JumpIfNotSmi(r4, &with_write_barrier);
+ __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
-
- __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastSmiOnlyElements(r6, r6, &call_builtin);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Push the element.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ InNewSpace(elements, r4, eq, &exit);
+ __ RecordWriteHelper(elements, end_elements, r4);
__ Drop(argc + 1);
__ Ret();
@@ -1649,15 +1604,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
- __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
- __ bind(&no_fast_elements_check);
-
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
@@ -1684,7 +1630,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ str(r6, MemOperand(r7));
// Push the argument.
- __ str(r2, MemOperand(end_elements));
+ __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ str(r6, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -2766,15 +2713,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ mov(r1, r0);
- __ RecordWriteField(r4,
- JSGlobalPropertyCell::kValueOffset,
- r1,
- r2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET);
-
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
@@ -3178,7 +3116,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@@ -3274,10 +3212,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
MapList* receiver_maps,
- CodeList* handler_stubs,
- MapList* transitioned_maps) {
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3290,20 +3227,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
int receiver_count = receiver_maps->length();
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<Map> map(receiver_maps->at(i));
- Handle<Code> code(handler_stubs->at(i));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
__ mov(ip, Operand(map));
__ cmp(r3, ip);
- if (transitioned_maps->at(i) == NULL) {
- __ Jump(code, RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(eq, &next_map);
- __ mov(r4, Operand(Handle<Map>(transitioned_maps->at(i))));
- __ Jump(code, RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
+ __ Jump(code, RelocInfo::CODE_TARGET, eq);
}
__ bind(&miss);
@@ -3525,7 +3454,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3612,7 +3540,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3953,7 +3880,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4017,7 +3943,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4157,7 +4082,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4310,10 +4234,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4322,7 +4244,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic;
Register value_reg = r0;
Register key_reg = r1;
@@ -4355,33 +4277,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(elements_kind == FAST_ELEMENTS);
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
+ __ add(scratch,
+ elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ str(value_reg,
+ MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ RecordWrite(scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+ receiver_reg , elements_reg);
+
// value_reg (r0) is preserved.
// Done.
__ Ret();
@@ -4390,10 +4294,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4409,15 +4309,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
+ Register scratch = r3;
+ Register elements_reg = r4;
+ Register mantissa_reg = r5;
+ Register exponent_reg = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4429,25 +4329,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ ldr(scratch1,
+ __ ldr(scratch,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ cmp(key_reg, scratch1);
+ __ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- receiver_reg,
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
+ // Handle smi values specially.
+ __ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ __ CheckMap(value_reg,
+ scratch,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ __ cmp(exponent_reg, scratch);
+ __ b(ge, &maybe_nan);
+
+ __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ __ bind(&have_double_value);
+ __ add(scratch, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ str(exponent_reg, FieldMemOperand(scratch, offset));
+ __ Ret();
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ b(gt, &is_nan);
+ __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ __ cmp(mantissa_reg, Operand(0));
+ __ b(eq, &have_double_value);
+ __ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ __ jmp(&have_double_value);
+
+ __ bind(&smi_value);
+ __ add(scratch, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ // scratch is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+
+ Register untagged_value = receiver_reg;
+ __ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(
+ masm,
+ untagged_value,
+ destination,
+ d0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ s2);
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vstr(d0, scratch, 0);
+ } else {
+ __ str(mantissa_reg, MemOperand(scratch, 0));
+ __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+ }
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4455,10 +4420,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index e1d7c2064e..4dd23c8bb4 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -201,14 +201,17 @@ function ConvertToString(x) {
function ConvertToLocaleString(e) {
- if (IS_NULL_OR_UNDEFINED(e)) {
+ if (e == null) {
return '';
} else {
- // According to ES5, seciton 15.4.4.3, the toLocaleString conversion
- // must throw a TypeError if ToObject(e).toLocaleString isn't
- // callable.
+ // e_obj's toLocaleString might be overwritten, check if it is a function.
+ // Call ToString if toLocaleString is not a function.
+ // See issue 877615.
var e_obj = ToObject(e);
- return %ToString(e_obj.toLocaleString());
+ if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
+ return ToString(e_obj.toLocaleString());
+ else
+ return ToString(e);
}
}
@@ -378,31 +381,18 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
function ArrayToString() {
- var array;
- var func;
- if (IS_ARRAY(this)) {
- func = this.join;
- if (func === ArrayJoin) {
- return Join(this, this.length, ',', ConvertToString);
- }
- array = this;
- } else {
- array = ToObject(this);
- func = array.join;
+ if (!IS_ARRAY(this)) {
+ throw new $TypeError('Array.prototype.toString is not generic');
}
- if (!IS_SPEC_FUNCTION(func)) {
- return %_CallFunction(array, ObjectToString);
- }
- return %_CallFunction(array, func);
+ return Join(this, this.length, ',', ConvertToString);
}
function ArrayToLocaleString() {
- var array = ToObject(this);
- var arrayLen = array.length;
- var len = TO_UINT32(arrayLen);
- if (len === 0) return "";
- return Join(array, len, ',', ConvertToLocaleString);
+ if (!IS_ARRAY(this)) {
+ throw new $TypeError('Array.prototype.toString is not generic');
+ }
+ return Join(this, this.length, ',', ConvertToLocaleString);
}
@@ -1003,24 +993,21 @@ function ArrayFilter(f, receiver) {
["Array.prototype.filter"]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = ToUint32(array.length);
-
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
-
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = ToUint32(this.length);
var result = [];
var result_length = 0;
for (var i = 0; i < length; i++) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- if (%_CallFunction(receiver, current, i, array, f)) {
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (%_CallFunction(receiver, current, i, this, f)) {
result[result_length++] = current;
}
}
@@ -1035,22 +1022,19 @@ function ArrayForEach(f, receiver) {
["Array.prototype.forEach"]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
-
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- %_CallFunction(receiver, current, i, array, f);
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ %_CallFunction(receiver, current, i, this, f);
}
}
}
@@ -1064,22 +1048,19 @@ function ArraySome(f, receiver) {
["Array.prototype.some"]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
-
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- if (%_CallFunction(receiver, current, i, array, f)) return true;
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (%_CallFunction(receiver, current, i, this, f)) return true;
}
}
return false;
@@ -1092,22 +1073,19 @@ function ArrayEvery(f, receiver) {
["Array.prototype.every"]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
-
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- if (!%_CallFunction(receiver, current, i, array, f)) return false;
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ if (!%_CallFunction(receiver, current, i, this, f)) return false;
}
}
return true;
@@ -1119,24 +1097,21 @@ function ArrayMap(f, receiver) {
["Array.prototype.map"]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = TO_UINT32(array.length);
-
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
-
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = TO_UINT32(this.length);
var result = new $Array();
var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) {
- var current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
- accumulator[i] = %_CallFunction(receiver, current, i, array, f);
+ var current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
+ accumulator[i] = %_CallFunction(receiver, current, i, this, f);
}
}
%MoveArrayContents(accumulator, result);
@@ -1270,20 +1245,19 @@ function ArrayReduce(callback, current) {
["Array.prototype.reduce"]);
}
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = ToObject(this);
- var length = ToUint32(array.length);
-
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping.
+ var length = ToUint32(this.length);
var i = 0;
+
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
- current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
+ current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
i++;
break find_initial;
}
@@ -1293,9 +1267,9 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) {
- var element = array[i];
- if (!IS_UNDEFINED(element) || i in array) {
- current = %_CallFunction(receiver, current, element, i, array, callback);
+ var element = this[i];
+ if (!IS_UNDEFINED(element) || i in this) {
+ current = %_CallFunction(receiver, current, element, i, this, callback);
}
}
return current;
@@ -1307,20 +1281,15 @@ function ArrayReduceRight(callback, current) {
["Array.prototype.reduceRight"]);
}
- // Pull out the length so that side effects are visible before the
- // callback function is checked.
- var array = ToObject(this);
- var length = ToUint32(array.length);
-
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
+ var i = ToUint32(this.length) - 1;
- var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
- current = array[i];
- if (!IS_UNDEFINED(current) || i in array) {
+ current = this[i];
+ if (!IS_UNDEFINED(current) || i in this) {
i--;
break find_initial;
}
@@ -1330,9 +1299,9 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) {
- var element = array[i];
- if (!IS_UNDEFINED(element) || i in array) {
- current = %_CallFunction(receiver, current, element, i, array, callback);
+ var element = this[i];
+ if (!IS_UNDEFINED(element) || i in this) {
+ current = %_CallFunction(receiver, current, element, i, this, callback);
}
}
return current;
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index bda85e69de..ad5f350816 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -38,7 +38,6 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic-inl.h"
-#include "incremental-marking.h"
#include "factory.h"
#include "runtime.h"
#include "runtime-profiler.h"
@@ -48,7 +47,6 @@
#include "ast.h"
#include "regexp-macro-assembler.h"
#include "platform.h"
-#include "store-buffer.h"
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -518,7 +516,6 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask) {
- rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
// Relocation info is read backwards.
@@ -739,38 +736,9 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
-ExternalReference ExternalReference::
- incremental_marking_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
-}
-
-
-ExternalReference ExternalReference::
- incremental_evacuation_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
-}
-
-
-ExternalReference ExternalReference::
- store_buffer_overflow_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
-}
-
-
-ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
-}
-
-
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
- return
- ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
+ return ExternalReference(Redirect(isolate,
+ FUNCTION_ADDR(Runtime::PerformGC)));
}
@@ -834,6 +802,17 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
}
+ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
+ return ExternalReference(isolate->factory()->the_hole_value().location());
+}
+
+
+ExternalReference ExternalReference::arguments_marker_location(
+ Isolate* isolate) {
+ return ExternalReference(isolate->factory()->arguments_marker().location());
+}
+
+
ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address());
}
@@ -861,14 +840,9 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
}
-ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
- return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
-}
-
-
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
- return ExternalReference(reinterpret_cast<Address>(
- isolate->heap()->NewSpaceMask()));
+ Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
+ return ExternalReference(mask);
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index e5661c9f12..d58034df0d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -143,9 +143,6 @@ class Label BASE_EMBEDDED {
};
-enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
-
-
// -----------------------------------------------------------------------------
// Relocation information
@@ -219,9 +216,8 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo() {}
-
- RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
- : pc_(pc), rmode_(rmode), data_(data), host_(host) {
+ RelocInfo(byte* pc, Mode rmode, intptr_t data)
+ : pc_(pc), rmode_(rmode), data_(data) {
}
static inline bool IsConstructCall(Mode mode) {
@@ -230,9 +226,6 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
}
- static inline bool IsEmbeddedObject(Mode mode) {
- return mode == EMBEDDED_OBJECT;
- }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@@ -265,7 +258,6 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
- Code* host() const { return host_; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@@ -361,7 +353,6 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_;
Mode rmode_;
intptr_t data_;
- Code* host_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management
@@ -570,13 +561,6 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
- static ExternalReference incremental_marking_record_write_function(
- Isolate* isolate);
- static ExternalReference incremental_evacuation_record_write_function(
- Isolate* isolate);
- static ExternalReference store_buffer_overflow_function(
- Isolate* isolate);
- static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference fill_heap_number_with_random_function(
Isolate* isolate);
@@ -593,6 +577,12 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
+ // Static variable Factory::the_hole_value.location()
+ static ExternalReference the_hole_value_location(Isolate* isolate);
+
+ // Static variable Factory::arguments_marker.location()
+ static ExternalReference arguments_marker_location(Isolate* isolate);
+
// Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate);
@@ -616,10 +606,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_start(Isolate* isolate);
static ExternalReference new_space_mask(Isolate* isolate);
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
- static ExternalReference new_space_mark_bits(Isolate* isolate);
-
- // Write barrier.
- static ExternalReference store_buffer_top(Isolate* isolate);
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index d493814544..418cc432b6 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -327,77 +327,56 @@ bool BinaryOperation::ResultOverwriteAllowed() {
}
-static bool IsTypeof(Expression* expr) {
- UnaryOperation* maybe_unary = expr->AsUnaryOperation();
- return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
-}
-
-
-// Check for the pattern: typeof <expression> equals <string literal>.
-static bool MatchLiteralCompareTypeof(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr,
- Handle<String>* check) {
- if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
- *expr = left->AsUnaryOperation()->expression();
- *check = Handle<String>::cast(right->AsLiteral()->handle());
- return true;
- }
- return false;
-}
-
-
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
- return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
- MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
-}
-
-
-static bool IsVoidOfLiteral(Expression* expr) {
- UnaryOperation* maybe_unary = expr->AsUnaryOperation();
- return maybe_unary != NULL &&
- maybe_unary->op() == Token::VOID &&
- maybe_unary->expression()->AsLiteral() != NULL;
-}
-
+ if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+
+ UnaryOperation* left_unary = left_->AsUnaryOperation();
+ UnaryOperation* right_unary = right_->AsUnaryOperation();
+ Literal* left_literal = left_->AsLiteral();
+ Literal* right_literal = right_->AsLiteral();
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+ right_literal != NULL && right_literal->handle()->IsString()) {
+ *expr = left_unary->expression();
+ *check = Handle<String>::cast(right_literal->handle());
+ return true;
+ }
-// Check for the pattern: void <literal> equals <expression>
-static bool MatchLiteralCompareUndefined(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr) {
- if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
- *expr = right;
+ // Check for the pattern: <string literal> == typeof <expression>.
+ if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
+ left_literal != NULL && left_literal->handle()->IsString()) {
+ *expr = right_unary->expression();
+ *check = Handle<String>::cast(left_literal->handle());
return true;
}
+
return false;
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
- return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
- MatchLiteralCompareUndefined(right_, op_, left_, expr);
-}
+ if (op_ != Token::EQ_STRICT) return false;
+ UnaryOperation* left_unary = left_->AsUnaryOperation();
+ UnaryOperation* right_unary = right_->AsUnaryOperation();
-// Check for the pattern: null equals <expression>
-static bool MatchLiteralCompareNull(Expression* left,
- Token::Value op,
- Expression* right,
- Expression** expr) {
- if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
- *expr = right;
+ // Check for the pattern: <expression> === void <literal>.
+ if (right_unary != NULL && right_unary->op() == Token::VOID &&
+ right_unary->expression()->AsLiteral() != NULL) {
+ *expr = left_;
return true;
}
- return false;
-}
+ // Check for the pattern: void <literal> === <expression>.
+ if (left_unary != NULL && left_unary->op() == Token::VOID &&
+ left_unary->expression()->AsLiteral() != NULL) {
+ *expr = right_;
+ return true;
+ }
-bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
- return MatchLiteralCompareNull(left_, op_, right_, expr) ||
- MatchLiteralCompareNull(right_, op_, left_, expr);
+ return false;
}
@@ -550,9 +529,7 @@ bool Conditional::IsInlineable() const {
bool VariableProxy::IsInlineable() const {
- return var()->IsUnallocated()
- || var()->IsStackAllocated()
- || var()->IsContextSlot();
+ return var()->IsUnallocated() || var()->IsStackAllocated();
}
@@ -621,6 +598,11 @@ bool CompareOperation::IsInlineable() const {
}
+bool CompareToNull::IsInlineable() const {
+ return expression()->IsInlineable();
+}
+
+
bool CountOperation::IsInlineable() const {
return expression()->IsInlineable();
}
@@ -764,41 +746,37 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
Property* property = expression()->AsProperty();
- if (property == NULL) {
- // Function call. Specialize for monomorphic calls.
- if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
- } else {
- // Method call. Specialize for the receiver types seen at runtime.
- Literal* key = property->key()->AsLiteral();
- ASSERT(key != NULL && key->handle()->IsString());
- Handle<String> name = Handle<String>::cast(key->handle());
- receiver_types_.Clear();
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ ASSERT(property != NULL);
+ // Specialize for the receiver types seen at runtime.
+ Literal* key = property->key()->AsLiteral();
+ ASSERT(key != NULL && key->handle()->IsString());
+ Handle<String> name = Handle<String>::cast(key->handle());
+ receiver_types_.Clear();
+ oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- int length = receiver_types_.length();
- for (int i = 0; i < length; i++) {
- Handle<Map> map = receiver_types_.at(i);
- ASSERT(!map.is_null() && *map != NULL);
- }
+ if (FLAG_enable_slow_asserts) {
+ int length = receiver_types_.length();
+ for (int i = 0; i < length; i++) {
+ Handle<Map> map = receiver_types_.at(i);
+ ASSERT(!map.is_null() && *map != NULL);
}
+ }
#endif
- check_type_ = oracle->GetCallCheckType(this);
- if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_.length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_.at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = Handle<JSObject>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_));
- map = Handle<Map>(holder_->map());
- }
- is_monomorphic_ = ComputeTarget(map, name);
+ is_monomorphic_ = oracle->CallIsMonomorphic(this);
+ check_type_ = oracle->GetCallCheckType(this);
+ if (is_monomorphic_) {
+ Handle<Map> map;
+ if (receiver_types_.length() > 0) {
+ ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+ map = receiver_types_.at(0);
+ } else {
+ ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+ holder_ = Handle<JSObject>(
+ oracle->GetPrototypeForPrimitiveCheck(check_type_));
+ map = Handle<Map>(holder_->map());
}
+ is_monomorphic_ = ComputeTarget(map, name);
}
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 0efc4835c4..b56205f9a6 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -90,6 +90,7 @@ namespace internal {
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
+ V(CompareToNull) \
V(ThisFunction)
#define AST_NODE_LIST(V) \
@@ -288,12 +289,6 @@ class Expression: public AstNode {
// True iff the expression is a literal represented as a smi.
virtual bool IsSmiLiteral() { return false; }
- // True iff the expression is a string literal.
- virtual bool IsStringLiteral() { return false; }
-
- // True iff the expression is the null literal.
- virtual bool IsNullLiteral() { return false; }
-
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@@ -398,29 +393,31 @@ class Block: public BreakableStatement {
class Declaration: public AstNode {
public:
Declaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* fun,
Scope* scope)
: proxy_(proxy),
mode_(mode),
fun_(fun),
scope_(scope) {
- ASSERT(mode == VAR || mode == CONST || mode == LET);
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST ||
+ mode == Variable::LET);
// At the moment there are no "const functions"'s in JavaScript...
- ASSERT(fun == NULL || mode == VAR || mode == LET);
+ ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
}
DECLARE_NODE_TYPE(Declaration)
VariableProxy* proxy() const { return proxy_; }
- VariableMode mode() const { return mode_; }
+ Variable::Mode mode() const { return mode_; }
FunctionLiteral* fun() const { return fun_; } // may be NULL
virtual bool IsInlineable() const;
Scope* scope() const { return scope_; }
private:
VariableProxy* proxy_;
- VariableMode mode_;
+ Variable::Mode mode_;
FunctionLiteral* fun_;
// Nested scope from which the declaration originated.
@@ -894,8 +891,6 @@ class Literal: public Expression {
virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
- virtual bool IsStringLiteral() { return handle_->IsString(); }
- virtual bool IsNullLiteral() { return handle_->IsNull(); }
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
@@ -1470,7 +1465,6 @@ class CompareOperation: public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr);
- bool IsLiteralCompareNull(Expression** expr);
private:
Token::Value op_;
@@ -1483,6 +1477,25 @@ class CompareOperation: public Expression {
};
+class CompareToNull: public Expression {
+ public:
+ CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
+ : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
+
+ DECLARE_NODE_TYPE(CompareToNull)
+
+ virtual bool IsInlineable() const;
+
+ bool is_strict() const { return is_strict_; }
+ Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
+ Expression* expression() const { return expression_; }
+
+ private:
+ bool is_strict_;
+ Expression* expression_;
+};
+
+
class Conditional: public Expression {
public:
Conditional(Isolate* isolate,
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index dc722cb749..f07e625ec0 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -34,7 +34,6 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
-#include "isolate-inl.h"
#include "macro-assembler.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -996,26 +995,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
-
- // RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
- proto_map->set_prototype(global_context()->initial_object_prototype());
- Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
- proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->empty_string());
- proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
- initial_map->set_prototype(*proto);
- factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
- JSRegExp::IRREGEXP, factory->empty_string(),
- JSRegExp::Flags(0), 0);
}
{ // -- J S O N
@@ -1097,11 +1076,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
- Handle<Map> non_strict_arguments_elements_map =
- factory->GetElementsTransitionMap(result,
- NON_STRICT_ARGUMENTS_ELEMENTS);
- result->set_map(*non_strict_arguments_elements_map);
- ASSERT(result->HasNonStrictArgumentsElements());
result->set_elements(*elements);
global_context()->set_aliased_arguments_boilerplate(*result);
}
@@ -1353,8 +1327,6 @@ void Genesis::InstallNativeFunctions() {
configure_instance_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
- INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
- to_complete_property_descriptor);
}
void Genesis::InstallExperimentalNativeFunctions() {
@@ -1583,18 +1555,6 @@ bool Genesis::InstallNatives() {
isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments();
- // InternalArrays should not use Smi-Only array optimizations. There are too
- // many places in the C++ runtime code (e.g. RegEx) that assume that
- // elements in InternalArrays can be set to non-Smi values without going
- // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
- // transition easy to trap. Moreover, they rarely are smi-only.
- MaybeObject* maybe_map =
- array_function->initial_map()->CopyDropTransitions();
- Map* new_map;
- if (!maybe_map->To<Map>(&new_map)) return maybe_map;
- new_map->set_elements_kind(FAST_ELEMENTS);
- array_function->set_initial_map(new_map);
-
// Make "length" magic on instances.
Handle<DescriptorArray> array_descriptors =
factory()->CopyAppendForeignDescriptor(
@@ -1978,15 +1938,14 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
if (!InstallExtension(extension->dependencies()[i])) return false;
}
Isolate* isolate = Isolate::Current();
- Handle<String> source_code =
- isolate->factory()->NewExternalStringFromAscii(extension->source());
- bool result = CompileScriptCached(
- CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
+ Vector<const char> source = CStrVector(extension->source());
+ Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
+ bool result = CompileScriptCached(CStrVector(extension->name()),
+ source_code,
+ isolate->bootstrapper()->extensions_cache(),
+ extension,
+ Handle<Context>(isolate->context()),
+ false);
ASSERT(isolate->has_pending_exception() != result);
if (!result) {
isolate->clear_pending_exception();
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index d513200f0b..e6a0699f07 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -33,7 +33,6 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
-#include "mark-compact.h"
#include "vm-state-inl.h"
namespace v8 {
@@ -203,7 +202,7 @@ BUILTIN(ArrayCodeGeneric) {
}
// 'array' now contains the JSArray we should initialize.
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastElements());
// Optimize the case where there is one argument and the argument is a
// small smi.
@@ -216,8 +215,7 @@ BUILTIN(ArrayCodeGeneric) {
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj));
- if (maybe_obj->IsFailure()) return maybe_obj;
+ array->SetContent(FixedArray::cast(obj));
return array;
}
}
@@ -241,11 +239,6 @@ BUILTIN(ArrayCodeGeneric) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Set length and elements on the array.
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(FixedArray::cast(obj));
- if (maybe_object->IsFailure()) return maybe_object;
-
AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -254,6 +247,7 @@ BUILTIN(ArrayCodeGeneric) {
elms->set(index, args[index+1], mode);
}
+ // Set length and elements on the array.
array->set_elements(FixedArray::cast(obj));
array->set_length(len);
@@ -301,7 +295,6 @@ static void CopyElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
- heap->incremental_marking()->RecordWrites(dst);
}
@@ -320,7 +313,6 @@ static void MoveElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
- heap->incremental_marking()->RecordWrites(dst);
}
@@ -366,14 +358,6 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
- // Maintain marking consistency for HeapObjectIterator and
- // IncrementalMarking.
- int size_delta = to_trim * kPointerSize;
- if (heap->marking()->TransferMark(elms->address(),
- elms->address() + size_delta)) {
- MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
- }
-
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@@ -400,42 +384,20 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
+ Heap* heap, Object* receiver) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements();
- Map* map = elms->map();
- if (map == heap->fixed_array_map()) {
- if (args == NULL || !array->HasFastSmiOnlyElements()) {
- return elms;
- }
- } else if (map == heap->fixed_cow_array_map()) {
- MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || !array->HasFastSmiOnlyElements() ||
- maybe_writable_result->IsFailure()) {
- return maybe_writable_result;
- }
- } else {
- return NULL;
+ if (elms->map() == heap->fixed_array_map()) return elms;
+ if (elms->map() == heap->fixed_cow_array_map()) {
+ return array->EnsureWritableFastElements();
}
-
- // Need to ensure that the arguments passed in args can be contained in
- // the array.
- int args_length = args->length();
- if (first_added_arg >= args_length) return array->elements();
-
- MaybeObject* maybe_array = array->EnsureCanContainElements(
- args,
- first_added_arg,
- args_length - first_added_arg);
- if (maybe_array->IsFailure()) return maybe_array;
- return array->elements();
+ return NULL;
}
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
- if (!FLAG_clever_optimizations) return false;
Context* global_context = heap->isolate()->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
@@ -451,18 +413,20 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
- GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
- name);
- Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
- int argc = args.length() - 1;
- ScopedVector<Handle<Object> > argv(argc);
- for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(i + 1);
- }
- bool pending_exception;
+ GetProperty(Handle<JSObject>(
+ isolate->global_context()->builtins()),
+ name);
+ ASSERT(js_builtin->IsJSFunction());
+ Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
+ ScopedVector<Object**> argv(args.length() - 1);
+ int n_args = args.length() - 1;
+ for (int i = 0; i < n_args; i++) {
+ argv[i] = args.at<Object>(i + 1).location();
+ }
+ bool pending_exception = false;
Handle<Object> result = Execution::Call(function,
args.receiver(),
- argc,
+ n_args,
argv.start(),
&pending_exception);
if (pending_exception) return Failure::Exception();
@@ -475,7 +439,7 @@ BUILTIN(ArrayPush) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
@@ -511,6 +475,7 @@ BUILTIN(ArrayPush) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
+ array->set_elements(elms);
}
// Add the provided values.
@@ -520,10 +485,6 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
- if (elms != array->elements()) {
- array->set_elements(elms);
- }
-
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
@@ -535,7 +496,7 @@ BUILTIN(ArrayPop) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
@@ -568,7 +529,7 @@ BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -578,7 +539,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@@ -590,7 +551,9 @@ BUILTIN(ArrayShift) {
}
if (!heap->lo_space()->Contains(elms)) {
- array->set_elements(LeftTrimFixedArray(heap, elms, 1));
+ // As elms still in the same space they used to be,
+ // there is no need to update region dirty mark.
+ array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
AssertNoAllocation no_gc;
@@ -610,7 +573,7 @@ BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -620,7 +583,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -629,10 +592,6 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add);
- if (maybe_object->IsFailure()) return maybe_object;
-
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@@ -641,11 +600,13 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
+
AssertNoAllocation no_gc;
if (len > 0) {
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
}
FillWithHoles(heap, new_elms, new_length, capacity);
+
elms = new_elms;
array->set_elements(elms);
} else {
@@ -673,7 +634,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastTypeElements() ||
+ if (!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -689,7 +650,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastTypeElements();
+ && JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -760,10 +721,6 @@ BUILTIN(ArraySlice) {
}
FixedArray* result_elms = FixedArray::cast(result);
- MaybeObject* maybe_object =
- result_array->EnsureCanContainElements(result_elms);
- if (maybe_object->IsFailure()) return maybe_object;
-
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
@@ -781,7 +738,7 @@ BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
+ EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -791,7 +748,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastTypeElements());
+ ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
@@ -868,9 +825,9 @@ BUILTIN(ArraySplice) {
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
+
int new_length = len - actual_delete_count + item_count;
- bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms) &&
@@ -885,8 +842,7 @@ BUILTIN(ArraySplice) {
}
elms = LeftTrimFixedArray(heap, elms, delta);
-
- elms_changed = true;
+ array->set_elements(elms, SKIP_WRITE_BARRIER);
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@@ -926,7 +882,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
- elms_changed = true;
+ array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@@ -942,10 +898,6 @@ BUILTIN(ArraySplice) {
elms->set(k, args[3 + k - actual_start], mode);
}
- if (elms_changed) {
- array->set_elements(elms);
- }
-
// Set the length.
array->set_length(Smi::FromInt(new_length));
@@ -968,7 +920,7 @@ BUILTIN(ArrayConcat) {
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
- if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
+ if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -1004,17 +956,6 @@ BUILTIN(ArrayConcat) {
}
FixedArray* result_elms = FixedArray::cast(result);
- // Ensure element type transitions happen before copying elements in.
- if (result_array->HasFastSmiOnlyElements()) {
- for (int i = 0; i < n_arguments; i++) {
- JSArray* array = JSArray::cast(args[i]);
- if (!array->HasFastSmiOnlyElements()) {
- result_array->EnsureCanContainNonSmiElements();
- break;
- }
- }
- }
-
// Copy data.
AssertNoAllocation no_gc;
int start_pos = 0;
@@ -1666,22 +1607,20 @@ void Builtins::Setup(bool create_heap_objects) {
const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated
- // buffer, before copying it into individual code objects. Be careful
- // with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[4*KB]; } u;
+ // buffer, before copying it into individual code objects.
+ byte buffer[4*KB];
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
- MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
+ MacroAssembler masm(isolate, buffer, sizeof buffer);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
- ASSERT(!masm.has_frame());
g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index 9241d26582..30a67a661b 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -134,12 +134,14 @@ static const CachedPower kCachedPowers[] = {
};
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
+static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
-// Difference between the decimal exponents in the table above.
-const int PowersOfTenCache::kDecimalExponentDistance = 8;
-const int PowersOfTenCache::kMinDecimalExponent = -348;
-const int PowersOfTenCache::kMaxDecimalExponent = 340;
+const int PowersOfTenCache::kDecimalExponentDistance =
+ kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
+const int PowersOfTenCache::kMinDecimalExponent =
+ kCachedPowers[0].decimal_exponent;
+const int PowersOfTenCache::kMaxDecimalExponent =
+ kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent,
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 4bc2603c53..00da4cba62 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -52,12 +52,11 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment();
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(masm, false);
+ // Nested stubs are not allowed for leafs.
+ AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
// Generate the code for the stub.
masm->set_generating_stub(true);
- NoCurrentFrameScope scope(masm);
Generate(masm);
}
@@ -128,10 +127,8 @@ Handle<Code> CodeStub::GetCode() {
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);
+
code = *new_object;
- Activate(code);
- } else {
- CHECK(IsPregenerated() == code->is_pregenerated());
}
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@@ -169,11 +166,7 @@ MaybeObject* CodeStub::TryGetCode() {
heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
heap->public_set_code_stubs(NumberDictionary::cast(new_object));
- } else if (MustBeInStubCache()) {
- return maybe_new_object;
}
-
- Activate(code);
}
return code;
@@ -195,11 +188,6 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
-void CodeStub::PrintName(StringStream* stream) {
- stream->Add("%s", MajorName(MajorKey(), false));
-}
-
-
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@@ -257,7 +245,6 @@ void InstanceofStub::PrintName(StringStream* stream) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
@@ -287,11 +274,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS: {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_js_array_,
- elements_kind_);
- }
+ KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
break;
case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@@ -319,20 +302,24 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::PrintName(StringStream* stream) {
- stream->Add("ArgumentsAccessStub_");
+ const char* type_name = NULL; // Make g++ happy.
switch (type_) {
- case READ_ELEMENT: stream->Add("ReadElement"); break;
- case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
- case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
- case NEW_STRICT: stream->Add("NewStrict"); break;
+ case READ_ELEMENT: type_name = "ReadElement"; break;
+ case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
+ case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
+ case NEW_STRICT: type_name = "NewStrict"; break;
}
+ stream->Add("ArgumentsAccessStub_%s", type_name);
}
void CallFunctionStub::PrintName(StringStream* stream) {
- stream->Add("CallFunctionStub_Args%d", argc_);
- if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
- if (RecordCallTarget()) stream->Add("_Recording");
+ const char* flags_name = NULL; // Make g++ happy.
+ switch (flags_) {
+ case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
+ case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
+ }
+ stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index acfbd469f0..64c89b93d1 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -45,23 +45,27 @@ namespace internal {
V(Compare) \
V(CompareIC) \
V(MathPow) \
- V(RecordWrite) \
- V(StoreBufferOverflow) \
- V(RegExpExec) \
V(TranscendentalCache) \
V(Instanceof) \
+ /* All stubs above this line only exist in a few versions, which are */ \
+ /* generated ahead of time. Therefore compiling a call to one of */ \
+ /* them can't cause a new stub to be compiled, so compiling a call to */ \
+ /* them is GC safe. The ones below this line exist in many variants */ \
+ /* so code compiling a call to one can cause a GC. This means they */ \
+ /* can't be called from other stubs, since stub generation code is */ \
+ /* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
- V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(RevertToNumber) \
V(ToBoolean) \
V(ToNumber) \
V(CounterOp) \
V(ArgumentsAccess) \
+ V(RegExpExec) \
V(RegExpConstructResult) \
V(NumberToString) \
V(CEntry) \
@@ -69,7 +73,7 @@ namespace internal {
V(KeyedLoadElement) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
- V(StringDictionaryLookup)
+ V(StringDictionaryNegativeLookup)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -138,27 +142,6 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
- bool CompilingCallsToThisStubIsGCSafe() {
- bool is_pregenerated = IsPregenerated();
- Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code));
- return is_pregenerated;
- }
-
- // See comment above, where Instanceof is defined.
- virtual bool IsPregenerated() { return false; }
-
- static void GenerateStubsAheadOfTime();
- static void GenerateFPStubs();
-
- // Some stubs put untagged junk on the stack that cannot be scanned by the
- // GC. This means that we must be statically sure that no GC can occur while
- // they are running. If that is the case they should override this to return
- // true, which will cause an assertion if we try to call something that can
- // GC or if we try to put a stack frame on top of the junk, which would not
- // result in a traversable stack.
- virtual bool SometimesSetsUpAFrame() { return true; }
-
protected:
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
@@ -181,14 +164,6 @@ class CodeStub BASE_EMBEDDED {
// Finish the code object after it has been generated.
virtual void FinishCode(Code* code) { }
- // Returns true if TryGetCode should fail if it failed
- // to register newly generated stub in the stub cache.
- virtual bool MustBeInStubCache() { return false; }
-
- // Activate newly generated stub. Is called after
- // registering stub in the stub cache.
- virtual void Activate(Code* code) { }
-
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
@@ -203,7 +178,9 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
- virtual void PrintName(StringStream* stream);
+ virtual void PrintName(StringStream* stream) {
+ stream->Add("%s", MajorName(MajorKey(), false));
+ }
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@@ -216,6 +193,9 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
+ // See comment above, where Instanceof is defined.
+ bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
+
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
@@ -324,7 +304,7 @@ class FastNewContextStub : public CodeStub {
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
+ ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
@@ -337,24 +317,6 @@ class FastNewContextStub : public CodeStub {
};
-class FastNewBlockContextStub : public CodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewBlockContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- Major MajorKey() { return FastNewBlockContext; }
- int MinorKey() { return slots_; }
-};
-
-
class FastCloneShallowArrayStub : public CodeStub {
public:
// Maximum length of copied elements array.
@@ -569,18 +531,11 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
- explicit CEntryStub(int result_size,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs)
- : result_size_(result_size), save_doubles_(save_doubles) { }
+ explicit CEntryStub(int result_size)
+ : result_size_(result_size), save_doubles_(false) { }
void Generate(MacroAssembler* masm);
-
- // The version of this stub that doesn't save doubles is generated ahead of
- // time, so it's OK to call it from other stubs that can't cope with GC during
- // their code generation. On machines that always have gp registers (x64) we
- // can generate both variants ahead of time.
- virtual bool IsPregenerated();
- static void GenerateAheadOfTime();
+ void SaveDoubles() { save_doubles_ = true; }
private:
void GenerateCore(MacroAssembler* masm,
@@ -595,7 +550,7 @@ class CEntryStub : public CodeStub {
// Number of pointers/values returned.
const int result_size_;
- SaveFPRegsMode save_doubles_;
+ bool save_doubles_;
Major MajorKey() { return CEntry; }
int MinorKey();
@@ -692,32 +647,10 @@ class CallFunctionStub: public CodeStub {
void Generate(MacroAssembler* masm);
- virtual void FinishCode(Code* code);
-
- static void Clear(Heap* heap, Address address);
-
- static Object* GetCachedValue(Address address);
-
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
- // The object that indicates an uninitialized cache.
- static Handle<Object> UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
- }
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static Object* RawUninitializedSentinel(Heap* heap) {
- return heap->raw_unchecked_the_hole_value();
- }
-
- // The object that indicates a megamorphic state.
- static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->undefined_value();
- }
-
private:
int argc_;
CallFunctionFlags flags_;
@@ -725,8 +658,8 @@ class CallFunctionStub: public CodeStub {
virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
- class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
+ class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
+ class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
Major MajorKey() { return CallFunction; }
int MinorKey() {
@@ -737,10 +670,6 @@ class CallFunctionStub: public CodeStub {
bool ReceiverMightBeImplicit() {
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
}
-
- bool RecordCallTarget() {
- return (flags_ & RECORD_CALL_TARGET) != 0;
- }
};
@@ -1005,8 +934,6 @@ class ToBooleanStub: public CodeStub {
virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index ceea7b9fea..cdc9ba1553 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
- int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
ASSERT(result_size_ == 1 || result_size_ == 2);
+ int result = save_doubles_ ? 1 : 0;
#ifdef _WIN64
return result | ((result_size_ == 1) ? 0 : 2);
#else
diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h
deleted file mode 100644
index 3b9c59ea53..0000000000
--- a/deps/v8/src/compiler-intrinsics.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILER_INTRINSICS_H_
-#define V8_COMPILER_INTRINSICS_H_
-
-namespace v8 {
-namespace internal {
-
-class CompilerIntrinsics {
- public:
- // Returns number of zero bits preceding least significant 1 bit.
- // Undefined for zero value.
- INLINE(static int CountTrailingZeros(uint32_t value));
-
- // Returns number of zero bits following most significant 1 bit.
- // Undefined for zero value.
- INLINE(static int CountLeadingZeros(uint32_t value));
-};
-
-#ifdef __GNUC__
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- return __builtin_ctz(value);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- return __builtin_clz(value);
-}
-
-#elif defined(_MSC_VER)
-
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
- unsigned long result; //NOLINT
- _BitScanForward(&result, static_cast<long>(value)); //NOLINT
- return static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
- unsigned long result; //NOLINT
- _BitScanReverse(&result, static_cast<long>(value)); //NOLINT
- return 31 - static_cast<int>(result);
-}
-
-#else
-#error Unsupported compiler
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_COMPILER_INTRINSICS_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 4979a7f866..5e1c4a9789 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -36,7 +36,6 @@
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
-#include "isolate-inl.h"
#include "lithium.h"
#include "liveedit.h"
#include "parser.h"
@@ -276,7 +275,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
Handle<Context> global_context(info->closure()->context()->global_context());
- TypeFeedbackOracle oracle(code, global_context, info->isolate());
+ TypeFeedbackOracle oracle(code, global_context);
HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
@@ -480,7 +479,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// that would be compiled lazily anyway, so we skip the preparse step
// in that case too.
ScriptDataImpl* pre_data = input_pre_data;
- bool harmony_scoping = natives != NATIVES_CODE && FLAG_harmony_scoping;
+ bool harmony_block_scoping = natives != NATIVES_CODE &&
+ FLAG_harmony_block_scoping;
if (pre_data == NULL
&& source_length >= FLAG_min_preparse_length) {
if (source->IsExternalTwoByteString()) {
@@ -488,12 +488,12 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream,
extension,
- harmony_scoping);
+ harmony_block_scoping);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream,
extension,
- harmony_scoping);
+ harmony_block_scoping);
}
}
@@ -516,9 +516,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
- if (natives == NATIVES_CODE) {
- info.MarkAsAllowingNativesSyntax();
- }
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 09aa23dec9..69ab27d9c8 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -83,12 +83,6 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
- void MarkAsAllowingNativesSyntax() {
- flags_ |= IsNativesSyntaxAllowed::encode(true);
- }
- bool allows_natives_syntax() const {
- return IsNativesSyntaxAllowed::decode(flags_);
- }
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
@@ -199,8 +193,6 @@ class CompilationInfo BASE_EMBEDDED {
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
class IsStrictMode: public BitField<bool, 4, 1> {};
- // Native syntax (%-stuff) allowed?
- class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 0cda430492..4f93abdff1 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) {
Handle<Object> Context::Lookup(Handle<String> name,
ContextLookupFlags flags,
- int* index,
+ int* index_,
PropertyAttributes* attributes,
BindingFlags* binding_flags) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
- *index = -1;
+ *index_ = -1;
*attributes = ABSENT;
*binding_flags = MISSING_BINDING;
@@ -110,50 +110,70 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n");
}
- // 1. Check global objects, subjects of with, and extension objects.
- if (context->IsGlobalContext() ||
- context->IsWithContext() ||
- (context->IsFunctionContext() && context->has_extension())) {
- Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
- // Context extension objects needs to behave as if they have no
- // prototype. So even if we want to follow prototype chains, we need
- // to only do a local lookup for context extension objects.
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
- object->IsJSContextExtensionObject()) {
- *attributes = object->GetLocalPropertyAttribute(*name);
+ // Check extension/with/global object.
+ if (!context->IsBlockContext() && context->has_extension()) {
+ if (context->IsCatchContext()) {
+ // Catch contexts have the variable name in the extension slot.
+ if (name->Equals(String::cast(context->extension()))) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found in catch context\n");
+ }
+ *index_ = Context::THROWN_OBJECT_INDEX;
+ *attributes = NONE;
+ *binding_flags = MUTABLE_IS_INITIALIZED;
+ return context;
+ }
} else {
- *attributes = object->GetPropertyAttribute(*name);
- }
- if (*attributes != ABSENT) {
- if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*object));
+ ASSERT(context->IsGlobalContext() ||
+ context->IsFunctionContext() ||
+ context->IsWithContext());
+ // Global, function, and with contexts may have an object in the
+ // extension slot.
+ Handle<JSObject> extension(JSObject::cast(context->extension()),
+ isolate);
+ // Context extension objects needs to behave as if they have no
+ // prototype. So even if we want to follow prototype chains, we
+ // need to only do a local lookup for context extension objects.
+ if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+ extension->IsJSContextExtensionObject()) {
+ *attributes = extension->GetLocalPropertyAttribute(*name);
+ } else {
+ *attributes = extension->GetPropertyAttribute(*name);
+ }
+ if (*attributes != ABSENT) {
+ // property found
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(*extension));
+ }
+ return extension;
}
- return object;
}
}
- // 2. Check the context proper if it has slots.
+ // Check serialized scope information of functions and blocks. Only
+ // functions can have parameters, and a function name.
if (context->IsFunctionContext() || context->IsBlockContext()) {
- // Use serialized scope information of functions and blocks to search
- // for the context index.
+ // We may have context-local slots. Check locals in the context.
Handle<SerializedScopeInfo> scope_info;
if (context->IsFunctionContext()) {
scope_info = Handle<SerializedScopeInfo>(
context->closure()->shared()->scope_info(), isolate);
} else {
+ ASSERT(context->IsBlockContext());
scope_info = Handle<SerializedScopeInfo>(
SerializedScopeInfo::cast(context->extension()), isolate);
}
- VariableMode mode;
- int slot_index = scope_info->ContextSlotIndex(*name, &mode);
- ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
- if (slot_index >= 0) {
+
+ Variable::Mode mode;
+ int index = scope_info->ContextSlotIndex(*name, &mode);
+ ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
+ if (index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
- slot_index, mode);
+ index, mode);
}
- *index = slot_index;
+ *index_ = index;
// Note: Fixed context slots are statically allocated by the compiler.
// Statically allocated variables always have a statically known mode,
// which is the mode with which they were declared when added to the
@@ -161,23 +181,23 @@ Handle<Object> Context::Lookup(Handle<String> name,
// declared variables that were introduced through declaration nodes)
// must not appear here.
switch (mode) {
- case INTERNAL: // Fall through.
- case VAR:
+ case Variable::INTERNAL: // Fall through.
+ case Variable::VAR:
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
break;
- case LET:
+ case Variable::LET:
*attributes = NONE;
*binding_flags = MUTABLE_CHECK_INITIALIZED;
break;
- case CONST:
+ case Variable::CONST:
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_CHECK_INITIALIZED;
break;
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- case TEMPORARY:
+ case Variable::DYNAMIC:
+ case Variable::DYNAMIC_GLOBAL:
+ case Variable::DYNAMIC_LOCAL:
+ case Variable::TEMPORARY:
UNREACHABLE();
break;
}
@@ -186,34 +206,22 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the slot corresponding to the intermediate context holding
// only the function name variable.
- if (follow_context_chain && context->IsFunctionContext()) {
- int function_index = scope_info->FunctionContextSlotIndex(*name);
- if (function_index >= 0) {
+ if (follow_context_chain) {
+ int index = scope_info->FunctionContextSlotIndex(*name);
+ if (index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
- function_index);
+ index);
}
- *index = function_index;
+ *index_ = index;
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED;
return context;
}
}
-
- } else if (context->IsCatchContext()) {
- // Catch contexts have the variable name in the extension slot.
- if (name->Equals(String::cast(context->extension()))) {
- if (FLAG_trace_contexts) {
- PrintF("=> found in catch context\n");
- }
- *index = Context::THROWN_OBJECT_INDEX;
- *attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
- return context;
- }
}
- // 3. Prepare to continue with the previous (next outermost) context.
+ // Proceed with the previous context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
} else {
@@ -245,7 +253,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// Check non-parameter locals.
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info());
- VariableMode mode;
+ Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) return false;
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index b80475f0f7..505f86c8ca 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -134,8 +134,6 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
- V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
- to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
@@ -254,7 +252,6 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
- TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
@@ -333,6 +330,12 @@ class Context: public FixedArray {
// Mark the global context with out of memory.
inline void mark_out_of_memory();
+ // The exception holder is the object used as a with object in
+ // the implementation of a catch block.
+ bool is_exception_holder(Object* object) {
+ return IsCatchContext() && extension() == object;
+ }
+
// A global context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -352,25 +355,29 @@ class Context: public FixedArray {
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context.
- // There are three possibilities:
+ // There are 4 possible outcomes:
+ //
+ // 1) index_ >= 0 && result->IsContext():
+ // most common case, the result is a Context, and index is the
+ // context slot index, and the slot exists.
+ // attributes == READ_ONLY for the function name variable, NONE otherwise.
//
- // 1) result->IsContext():
- // The binding was found in a context. *index is always the
- // non-negative slot index. *attributes is NONE for var and let
- // declarations, READ_ONLY for const declarations (never ABSENT).
+ // 2) index_ >= 0 && result->IsJSObject():
+ // the result is the JSObject arguments object, the index is the parameter
+ // index, i.e., key into the arguments object, and the property exists.
+ // attributes != ABSENT.
//
- // 2) result->IsJSObject():
- // The binding was found as a named property in a context extension
- // object (i.e., was introduced via eval), as a property on the subject
- // of with, or as a property of the global object. *index is -1 and
- // *attributes is not ABSENT.
+ // 3) index_ < 0 && result->IsJSObject():
+ // the result is the JSObject extension context or the global object,
+ // and the name is the property name, and the property exists.
+ // attributes != ABSENT.
//
- // 3) result.is_null():
- // There was no binding found, *index is always -1 and *attributes is
- // always ABSENT.
+ // 4) index_ < 0 && result.is_null():
+ // there was no context found with the corresponding property.
+ // attributes == ABSENT.
Handle<Object> Lookup(Handle<String> name,
ContextLookupFlags flags,
- int* index,
+ int* index_,
PropertyAttributes* attributes,
BindingFlags* binding_flags);
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 8bc11bf83d..41cf0d54c2 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -47,7 +47,7 @@ namespace v8 {
namespace internal {
static inline double JunkStringValue() {
- return BitCast<double, uint64_t>(kQuietNaNMask);
+ return std::numeric_limits<double>::quiet_NaN();
}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 31aaf6b737..e51ad6501c 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -28,6 +28,8 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
+#include <limits>
+
#include "utils.h"
namespace v8 {
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 8fbc876dab..adefba7322 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifdef ENABLE_DEBUGGER_SUPPORT
#include "d8.h"
#include "d8-debug.h"
@@ -368,5 +367,3 @@ void KeyboardThread::Run() {
} // namespace v8
-
-#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index a516576faf..55f0d4c2ab 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+#ifndef V8_SHARED
bool FLAG_debugger = i::FLAG_debugger;
#else
bool FLAG_debugger = false;
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#endif // V8_SHARED
HandleScope handle_scope;
TryCatch try_catch;
options.script_executed = true;
@@ -594,7 +594,6 @@ void Shell::InstallUtilityScript() {
Context::Scope utility_scope(utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
@@ -817,7 +816,7 @@ void Shell::OnExit() {
static FILE* FOpen(const char* path, const char* mode) {
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
+#if (defined(_WIN32) || defined(_WIN64))
FILE* result;
if (fopen_s(&result, path, mode) == 0) {
return result;
@@ -901,6 +900,9 @@ void Shell::RunShell() {
#ifndef V8_SHARED
console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
+ if (i::FLAG_debugger) {
+ printf("JavaScript debugger enabled\n");
+ }
console->Open();
while (true) {
i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@@ -1251,22 +1253,14 @@ int Shell::RunMain(int argc, char* argv[]) {
Locker lock;
HandleScope scope;
Persistent<Context> context = CreateEvaluationContext();
- if (options.last_run) {
- // Keep using the same context in the interactive shell.
- evaluation_context_ = context;
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript();
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- }
{
Context::Scope cscope(context);
options.isolate_sources[0].Execute();
}
- if (!options.last_run) {
+ if (options.last_run) {
+ // Keep using the same context in the interactive shell
+ evaluation_context_ = context;
+ } else {
context.Dispose();
}
@@ -1337,11 +1331,9 @@ int Shell::Main(int argc, char* argv[]) {
if (( options.interactive_shell
|| !options.script_executed )
&& !options.test_shell ) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (!i::FLAG_debugger) {
- InstallUtilityScript();
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#ifndef V8_SHARED
+ InstallUtilityScript();
+#endif // V8_SHARED
RunShell();
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 3d79485b57..a229d39c3e 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -40,7 +40,6 @@
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
-#include "isolate-inl.h"
#include "list.h"
#include "messages.h"
#include "natives.h"
@@ -402,15 +401,15 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
- if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
+ Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+ if (code->is_call_stub() || code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
- Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- target_code->arguments_count(), target_code->kind());
+ Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
+ code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@@ -420,7 +419,7 @@ void BreakLocationIterator::PrepareStepIn() {
#ifdef DEBUG
// All the following stuff is needed only for assertion checks so the code
// is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = target_code;
+ Handle<Code> maybe_call_function_stub = code;
if (IsDebugBreak()) {
Address original_target = original_rinfo()->target_address();
maybe_call_function_stub =
@@ -437,9 +436,8 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in through CallFunction stub should also be prepared by caller of
// this function (Debug::PrepareStep) which should flood target function
// with breakpoints.
- ASSERT(RelocInfo::IsConstructCall(rmode()) ||
- target_code->is_inline_cache_stub() ||
- is_call_function_stub);
+ ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
+ || is_call_function_stub);
#endif
}
}
@@ -476,11 +474,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+ Handle<Code> code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
- Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
+ Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
}
}
@@ -774,7 +772,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Execute the shared function in the debugger context.
Handle<Context> context = isolate->global_context();
- bool caught_exception;
+ bool caught_exception = false;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
@@ -1105,13 +1103,14 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
- bool caught_exception;
- Handle<Object> argv[] = { break_id, break_point_object };
+ bool caught_exception = false;
+ const int argc = 2;
+ Object** argv[argc] = {
+ break_id.location(),
+ reinterpret_cast<Object**>(break_point_object.location())
+ };
Handle<Object> result = Execution::TryCall(check_break_point,
- isolate_->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ isolate_->js_builtins_object(), argc, argv, &caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
@@ -1733,10 +1732,6 @@ void Debug::PrepareForBreakPoints() {
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
- // We are going to iterate heap to find all functions without
- // debug break slots.
- isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
-
AssertNoAllocation no_allocation;
Builtins* builtins = isolate_->builtins();
Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
@@ -2002,10 +1997,9 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
- // scripts which are no longer referenced. The second also sweeps precisely,
- // which saves us doing yet another GC to make the heap iterable.
- heap->CollectAllGarbage(Heap::kNoGCFlags);
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ // scripts which are no longer referenced.
+ heap->CollectAllGarbage(false);
+ heap->CollectAllGarbage(false);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@@ -2013,8 +2007,6 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
- AssertNoAllocation no_allocation;
-
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
@@ -2055,7 +2047,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate_->heap()->CollectAllGarbage(false);
// Get the scripts from the cache.
return script_cache_->GetScripts();
@@ -2101,8 +2093,7 @@ Debugger::~Debugger() {
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
- int argc,
- Handle<Object> argv[],
+ int argc, Object*** argv,
bool* caught_exception) {
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
@@ -2119,9 +2110,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
Handle<JSObject>(isolate_->debug()->debug_context()->global()),
- argc,
- argv,
- caught_exception);
+ argc, argv, caught_exception);
return js_object;
}
@@ -2130,11 +2119,10 @@ Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
// Create the execution state object.
Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id());
- Handle<Object> argv[] = { break_id };
+ const int argc = 1;
+ Object** argv[argc] = { break_id.location() };
return MakeJSObject(CStrVector("MakeExecutionState"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ argc, argv, caught_exception);
}
@@ -2142,9 +2130,11 @@ Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit,
bool* caught_exception) {
// Create the new break event object.
- Handle<Object> argv[] = { exec_state, break_points_hit };
+ const int argc = 2;
+ Object** argv[argc] = { exec_state.location(),
+ break_points_hit.location() };
return MakeJSObject(CStrVector("MakeBreakEvent"),
- ARRAY_SIZE(argv),
+ argc,
argv,
caught_exception);
}
@@ -2156,24 +2146,23 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
bool* caught_exception) {
Factory* factory = isolate_->factory();
// Create the new exception event object.
- Handle<Object> argv[] = { exec_state,
- exception,
- factory->ToBoolean(uncaught) };
+ const int argc = 3;
+ Object** argv[argc] = { exec_state.location(),
+ exception.location(),
+ uncaught ? factory->true_value().location() :
+ factory->false_value().location()};
return MakeJSObject(CStrVector("MakeExceptionEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ argc, argv, caught_exception);
}
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) {
// Create the new function event object.
- Handle<Object> argv[] = { function };
+ const int argc = 1;
+ Object** argv[argc] = { function.location() };
return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
- ARRAY_SIZE(argv),
- argv,
- caught_exception);
+ argc, argv, caught_exception);
}
@@ -2184,11 +2173,14 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
- Handle<Object> argv[] = { exec_state,
- script_wrapper,
- factory->ToBoolean(before) };
+ const int argc = 3;
+ Object** argv[argc] = { exec_state.location(),
+ script_wrapper.location(),
+ before ? factory->true_value().location() :
+ factory->false_value().location() };
+
return MakeJSObject(CStrVector("MakeCompileEvent"),
- ARRAY_SIZE(argv),
+ argc,
argv,
caught_exception);
}
@@ -2199,10 +2191,11 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
- Handle<Object> argv[] = { exec_state, id_object };
+ const int argc = 2;
+ Object** argv[argc] = { exec_state.location(), id_object.location() };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
- ARRAY_SIZE(argv),
+ argc,
argv,
caught_exception);
}
@@ -2352,13 +2345,12 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<JSValue> wrapper = GetScriptWrapper(script);
// Call UpdateScriptBreakPoints expect no exceptions.
- bool caught_exception;
- Handle<Object> argv[] = { wrapper };
+ bool caught_exception = false;
+ const int argc = 1;
+ Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ Isolate::Current()->js_builtins_object(), argc, argv,
+ &caught_exception);
if (caught_exception) {
return;
}
@@ -2489,16 +2481,13 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
- Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
- exec_state,
- event_data,
- event_listener_data_ };
- bool caught_exception;
- Execution::TryCall(fun,
- isolate_->global(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ const int argc = 4;
+ Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
+ exec_state.location(),
+ Handle<Object>::cast(event_data).location(),
+ event_listener_data_.location() };
+ bool caught_exception = false;
+ Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
// Silently ignore exceptions from debug event listeners.
}
@@ -2867,11 +2856,12 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
return isolate_->factory()->undefined_value();
}
- Handle<Object> argv[] = { exec_state, data };
+ static const int kArgc = 2;
+ Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> result = Execution::Call(
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
- ARRAY_SIZE(argv),
+ kArgc,
argv,
pending_exception);
return result;
@@ -2939,94 +2929,6 @@ void Debugger::CallMessageDispatchHandler() {
}
-EnterDebugger::EnterDebugger()
- : isolate_(Isolate::Current()),
- prev_(isolate_->debug()->debugger_entry()),
- it_(isolate_),
- has_js_frames_(!it_.done()),
- save_(isolate_) {
- Debug* debug = isolate_->debug();
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
- ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
- // Link recursive debugger entry.
- debug->set_debugger_entry(this);
-
- // Store the previous break id and frame id.
- break_id_ = debug->break_id();
- break_frame_id_ = debug->break_frame_id();
-
- // Create the new break info. If there is no JavaScript frames there is no
- // break frame id.
- if (has_js_frames_) {
- debug->NewBreak(it_.frame()->id());
- } else {
- debug->NewBreak(StackFrame::NO_ID);
- }
-
- // Make sure that debugger is loaded and enter the debugger context.
- load_failed_ = !debug->Load();
- if (!load_failed_) {
- // NOTE the member variable save which saves the previous context before
- // this change.
- isolate_->set_context(*debug->debug_context());
- }
-}
-
-
-EnterDebugger::~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
- Debug* debug = isolate_->debug();
-
- // Restore to the previous break state.
- debug->SetBreak(break_frame_id_, break_id_);
-
- // Check for leaving the debugger.
- if (prev_ == NULL) {
- // Clear mirror cache when leaving the debugger. Skip this if there is a
- // pending exception as clearing the mirror cache calls back into
- // JavaScript. This can happen if the v8::Debug::Call is used in which
- // case the exception should end up in the calling code.
- if (!isolate_->has_pending_exception()) {
- // Try to avoid any pending debug break breaking in the clear mirror
- // cache JavaScript code.
- if (isolate_->stack_guard()->IsDebugBreak()) {
- debug->set_interrupts_pending(DEBUGBREAK);
- isolate_->stack_guard()->Continue(DEBUGBREAK);
- }
- debug->ClearMirrorCache();
- }
-
- // Request preemption and debug break when leaving the last debugger entry
- // if any of these where recorded while debugging.
- if (debug->is_interrupt_pending(PREEMPT)) {
- // This re-scheduling of preemption is to avoid starvation in some
- // debugging scenarios.
- debug->clear_interrupt_pending(PREEMPT);
- isolate_->stack_guard()->Preempt();
- }
- if (debug->is_interrupt_pending(DEBUGBREAK)) {
- debug->clear_interrupt_pending(DEBUGBREAK);
- isolate_->stack_guard()->DebugBreak();
- }
-
- // If there are commands in the queue when leaving the debugger request
- // that these commands are processed.
- if (isolate_->debugger()->HasCommands()) {
- isolate_->stack_guard()->DebugCommand();
- }
-
- // If leaving the debugger with the debugger no longer active unload it.
- if (!isolate_->debugger()->IsDebuggerActive()) {
- isolate_->debugger()->UnloadDebugger();
- }
- }
-
- // Leaving this debugger entry.
- debug->set_debugger_entry(prev_);
-}
-
-
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index f01ef393f8..a098040c0d 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -705,8 +705,7 @@ class Debugger {
void DebugRequest(const uint16_t* json_request, int length);
Handle<Object> MakeJSObject(Vector<const char> constructor_name,
- int argc,
- Handle<Object> argv[],
+ int argc, Object*** argv,
bool* caught_exception);
Handle<Object> MakeExecutionState(bool* caught_exception);
Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@@ -870,8 +869,91 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
- EnterDebugger();
- ~EnterDebugger();
+ EnterDebugger()
+ : isolate_(Isolate::Current()),
+ prev_(isolate_->debug()->debugger_entry()),
+ it_(isolate_),
+ has_js_frames_(!it_.done()),
+ save_(isolate_) {
+ Debug* debug = isolate_->debug();
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+ ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+
+ // Link recursive debugger entry.
+ debug->set_debugger_entry(this);
+
+ // Store the previous break id and frame id.
+ break_id_ = debug->break_id();
+ break_frame_id_ = debug->break_frame_id();
+
+ // Create the new break info. If there is no JavaScript frames there is no
+ // break frame id.
+ if (has_js_frames_) {
+ debug->NewBreak(it_.frame()->id());
+ } else {
+ debug->NewBreak(StackFrame::NO_ID);
+ }
+
+ // Make sure that debugger is loaded and enter the debugger context.
+ load_failed_ = !debug->Load();
+ if (!load_failed_) {
+ // NOTE the member variable save which saves the previous context before
+ // this change.
+ isolate_->set_context(*debug->debug_context());
+ }
+ }
+
+ ~EnterDebugger() {
+ ASSERT(Isolate::Current() == isolate_);
+ Debug* debug = isolate_->debug();
+
+ // Restore to the previous break state.
+ debug->SetBreak(break_frame_id_, break_id_);
+
+ // Check for leaving the debugger.
+ if (prev_ == NULL) {
+ // Clear mirror cache when leaving the debugger. Skip this if there is a
+ // pending exception as clearing the mirror cache calls back into
+ // JavaScript. This can happen if the v8::Debug::Call is used in which
+ // case the exception should end up in the calling code.
+ if (!isolate_->has_pending_exception()) {
+ // Try to avoid any pending debug break breaking in the clear mirror
+ // cache JavaScript code.
+ if (isolate_->stack_guard()->IsDebugBreak()) {
+ debug->set_interrupts_pending(DEBUGBREAK);
+ isolate_->stack_guard()->Continue(DEBUGBREAK);
+ }
+ debug->ClearMirrorCache();
+ }
+
+ // Request preemption and debug break when leaving the last debugger entry
+ // if any of these where recorded while debugging.
+ if (debug->is_interrupt_pending(PREEMPT)) {
+ // This re-scheduling of preemption is to avoid starvation in some
+ // debugging scenarios.
+ debug->clear_interrupt_pending(PREEMPT);
+ isolate_->stack_guard()->Preempt();
+ }
+ if (debug->is_interrupt_pending(DEBUGBREAK)) {
+ debug->clear_interrupt_pending(DEBUGBREAK);
+ isolate_->stack_guard()->DebugBreak();
+ }
+
+ // If there are commands in the queue when leaving the debugger request
+ // that these commands are processed.
+ if (isolate_->debugger()->HasCommands()) {
+ isolate_->stack_guard()->DebugCommand();
+ }
+
+ // If leaving the debugger with the debugger no longer active unload it.
+ if (!isolate_->debugger()->IsDebuggerActive()) {
+ isolate_->debugger()->UnloadDebugger();
+ }
+ }
+
+ // Leaving this debugger entry.
+ debug->set_debugger_entry(prev_);
+ }
// Check whether the debugger could be entered.
inline bool FailedToEnter() { return load_failed_; }
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index b0522757eb..5feb73d739 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -52,13 +52,11 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) {
- Isolate::Current()->memory_allocator()->Free(
- eager_deoptimization_entry_code_);
+ eager_deoptimization_entry_code_->Free(EXECUTABLE);
eager_deoptimization_entry_code_ = NULL;
}
if (lazy_deoptimization_entry_code_ != NULL) {
- Isolate::Current()->memory_allocator()->Free(
- lazy_deoptimization_entry_code_);
+ lazy_deoptimization_entry_code_->Free(EXECUTABLE);
lazy_deoptimization_entry_code_ = NULL;
}
}
@@ -73,8 +71,6 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
-// We rely on this function not causing a GC. It is called from generated code
-// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@@ -323,8 +319,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
input_(NULL),
output_count_(0),
output_(NULL),
- frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
- has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@@ -349,26 +343,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (type == EAGER) {
ASSERT(from == NULL);
optimized_code_ = function_->code();
- if (FLAG_trace_deopt && FLAG_code_comments) {
- // Print instruction associated with this bailout.
- const char* last_comment = NULL;
- int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
- | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::COMMENT) {
- last_comment = reinterpret_cast<const char*>(info->data());
- }
- if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
- unsigned id = Deoptimizer::GetDeoptimizationId(
- info->target_address(), Deoptimizer::EAGER);
- if (id == bailout_id && last_comment != NULL) {
- PrintF(" %s\n", last_comment);
- break;
- }
- }
- }
- }
} else if (type == LAZY) {
optimized_code_ = FindDeoptimizingCodeFromAddress(from);
ASSERT(optimized_code_ != NULL);
@@ -412,7 +386,7 @@ void Deoptimizer::DeleteFrameDescriptions() {
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL;
- MemoryChunk* base = NULL;
+ LargeObjectChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
@@ -426,12 +400,12 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
- static_cast<Address>(base->body()) + (id * table_entry_size_);
+ static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- MemoryChunk* base = NULL;
+ LargeObjectChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
@@ -439,14 +413,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
- addr < base->body() ||
- addr >= base->body() +
+ addr < base->GetStartAddress() ||
+ addr >= base->GetStartAddress() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base->body()) % table_entry_size_);
- return static_cast<int>(addr - base->body()) / table_entry_size_;
+ static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
+ return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
}
@@ -488,8 +462,6 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
}
-// We rely on this function not causing a GC. It is called from generated code
-// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
if (bailout_type_ == OSR) {
DoComputeOsrOutputFrame();
@@ -641,13 +613,11 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
intptr_t input_value = input_->GetRegister(input_reg);
if (FLAG_trace_deopt) {
PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -705,12 +675,10 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
+ PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
output_offset,
input_value,
input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -985,10 +953,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
+ PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
stack_check_cursor += 2 * kIntSize;
}
}
@@ -1074,7 +1039,7 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address,
}
-MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
+LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
@@ -1088,15 +1053,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- MemoryChunk* chunk =
- Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
- EXECUTABLE,
- NULL);
+ LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
- memcpy(chunk->body(), desc.buffer, desc.instr_size);
- CPU::FlushICache(chunk->body(), desc.instr_size);
+ memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+ CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
return chunk;
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 3cf70466c0..8641261b17 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -86,8 +86,8 @@ class DeoptimizerData {
#endif
private:
- MemoryChunk* eager_deoptimization_entry_code_;
- MemoryChunk* lazy_deoptimization_entry_code_;
+ LargeObjectChunk* eager_deoptimization_entry_code_;
+ LargeObjectChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -173,8 +173,7 @@ class Deoptimizer : public Malloced {
// Patch stack guard check at instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
- static void PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
+ static void PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code);
@@ -212,11 +211,6 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
- static int frame_alignment_marker_offset() {
- return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
- static int has_alignment_padding_offset() {
- return OFFSET_OF(Deoptimizer, has_alignment_padding_);
- }
static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -291,7 +285,7 @@ class Deoptimizer : public Malloced {
void AddDoubleValue(intptr_t slot_address, double value);
- static MemoryChunk* CreateCode(BailoutType type);
+ static LargeObjectChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -321,10 +315,6 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
- // Frames can be dynamically padded on ia32 to align untagged doubles.
- Object* frame_alignment_marker_;
- intptr_t has_alignment_padding_;
-
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index e3b40ab93f..1e67b4cb66 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -200,7 +200,7 @@ static int DecodeIt(FILE* f,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
// Indent the printing of the reloc info.
if (i == 0) {
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 5e7a84e38b..e4ecfe8dd6 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -227,9 +227,7 @@ class FastElementsAccessor
public:
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
- ASSERT(obj->HasFastElements() ||
- obj->HasFastSmiOnlyElements() ||
- obj->HasFastArgumentsElements());
+ ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@@ -598,9 +596,6 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() {
static struct ConcreteElementsAccessors {
- // Use the fast element handler for smi-only arrays. The implementation is
- // currently identical.
- FastElementsAccessor fast_smi_elements_handler;
FastElementsAccessor fast_elements_handler;
FastDoubleElementsAccessor fast_double_elements_handler;
DictionaryElementsAccessor dictionary_elements_handler;
@@ -617,7 +612,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
} element_accessors;
static ElementsAccessor* accessor_array[] = {
- &element_accessors.fast_smi_elements_handler,
&element_accessors.fast_elements_handler,
&element_accessors.fast_double_elements_handler,
&element_accessors.dictionary_elements_handler,
@@ -633,9 +627,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
&element_accessors.pixel_elements_handler
};
- STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
- kElementsKindCount);
-
elements_accessors_ = accessor_array;
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 29955faff1..f36d4e4911 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -33,7 +33,6 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
-#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
#include "v8threads.h"
@@ -66,13 +65,13 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
}
-static Handle<Object> Invoke(bool is_construct,
- Handle<JSFunction> function,
+static Handle<Object> Invoke(bool construct,
+ Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
- Handle<Object> args[],
+ Object*** args,
bool* has_pending_exception) {
- Isolate* isolate = function->GetIsolate();
+ Isolate* isolate = func->GetIsolate();
// Entering JavaScript.
VMState state(isolate, JS);
@@ -80,15 +79,21 @@ static Handle<Object> Invoke(bool is_construct,
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
- typedef Object* (*JSEntryFunction)(byte* entry,
- Object* function,
- Object* receiver,
- int argc,
- Object*** args);
-
- Handle<Code> code = is_construct
- ? isolate->factory()->js_construct_entry_code()
- : isolate->factory()->js_entry_code();
+ typedef Object* (*JSEntryFunction)(
+ byte* entry,
+ Object* function,
+ Object* receiver,
+ int argc,
+ Object*** args);
+
+ Handle<Code> code;
+ if (construct) {
+ JSConstructEntryStub stub;
+ code = stub.GetCode();
+ } else {
+ JSEntryStub stub;
+ code = stub.GetCode();
+ }
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
@@ -100,22 +105,21 @@ static Handle<Object> Invoke(bool is_construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
- ASSERT(function->context()->global()->IsGlobalObject());
+ ASSERT(func->context()->global()->IsGlobalObject());
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
NoHandleAllocation na;
- JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+ JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
- byte* function_entry = function->code()->entry();
- JSFunction* func = *function;
- Object* recv = *receiver;
- Object*** argv = reinterpret_cast<Object***>(args);
- value =
- CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
+ byte* entry_address = func->code()->entry();
+ JSFunction* function = *func;
+ Object* receiver_pointer = *receiver;
+ value = CALL_GENERATED_CODE(entry, entry_address, function,
+ receiver_pointer, argc, args);
}
#ifdef DEBUG
@@ -144,11 +148,9 @@ static Handle<Object> Invoke(bool is_construct,
Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
- Handle<Object> argv[],
+ Object*** args,
bool* pending_exception,
bool convert_receiver) {
- *pending_exception = false;
-
if (!callable->IsJSFunction()) {
callable = TryGetFunctionDelegate(callable, pending_exception);
if (*pending_exception) return callable;
@@ -170,15 +172,13 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (*pending_exception) return callable;
}
- return Invoke(false, func, receiver, argc, argv, pending_exception);
+ return Invoke(false, func, receiver, argc, args, pending_exception);
}
-Handle<Object> Execution::New(Handle<JSFunction> func,
- int argc,
- Handle<Object> argv[],
- bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global(), argc, argv,
+Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
+ Object*** args, bool* pending_exception) {
+ return Invoke(true, func, Isolate::Current()->global(), argc, args,
pending_exception);
}
@@ -186,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
- Handle<Object> args[],
+ Object*** args,
bool* caught_exception) {
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
@@ -195,7 +195,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
v8::TryCatch catcher;
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- *caught_exception = false;
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
@@ -378,7 +377,7 @@ void StackGuard::DisableInterrupts() {
bool StackGuard::IsInterrupted() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
+ return thread_local_.interrupt_flags_ & INTERRUPT;
}
@@ -404,7 +403,7 @@ void StackGuard::Preempt() {
bool StackGuard::IsTerminateExecution() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
+ return thread_local_.interrupt_flags_ & TERMINATE;
}
@@ -417,7 +416,7 @@ void StackGuard::TerminateExecution() {
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
+ return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
}
@@ -434,22 +433,6 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
-bool StackGuard::IsGCRequest() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
-}
-
-
-void StackGuard::RequestGC() {
- ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= GC_REQUEST;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
-}
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@@ -572,15 +555,14 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
- do { \
- Isolate* isolate = Isolate::Current(); \
- Handle<Object> argv[] = args; \
- ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
- isolate->js_builtins_object(), \
- ARRAY_SIZE(argv), argv, \
- has_pending_exception); \
+#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
+ do { \
+ Isolate* isolate = Isolate::Current(); \
+ Object** args[argc] = argv; \
+ ASSERT(has_pending_exception != NULL); \
+ return Call(isolate->name##_fun(), \
+ isolate->js_builtins_object(), argc, args, \
+ has_pending_exception); \
} while (false)
@@ -601,44 +583,44 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_number, { obj }, exc);
+ RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_string, { obj }, exc);
+ RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
+ RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
- RETURN_NATIVE_CALL(to_object, { obj }, exc);
+ RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_integer, { obj }, exc);
+ RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
+ RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
- RETURN_NATIVE_CALL(to_int32, { obj }, exc);
+ RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
}
Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<Object> time_obj = FACTORY->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
+ RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
}
@@ -675,7 +657,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
bool caught_exception;
Handle<Object> index_object = factory->NewNumberFromInt(int_index);
- Handle<Object> index_arg[] = { index_object };
+ Object** index_arg[] = { index_object.location() };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string,
ARRAY_SIZE(index_arg),
@@ -689,8 +671,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<JSFunction> Execution::InstantiateFunction(
- Handle<FunctionTemplateInfo> data,
- bool* exc) {
+ Handle<FunctionTemplateInfo> data, bool* exc) {
Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
@@ -699,12 +680,10 @@ Handle<JSFunction> Execution::InstantiateFunction(
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
- Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
+ Object** args[1] = { Handle<Object>::cast(data).location() };
+ Handle<Object> result =
+ Call(isolate->instantiate_fun(),
+ isolate->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result);
}
@@ -731,12 +710,10 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
ASSERT(!*exc);
return Handle<JSObject>(JSObject::cast(result));
} else {
- Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
+ Object** args[1] = { Handle<Object>::cast(data).location() };
+ Handle<Object> result =
+ Call(isolate->instantiate_fun(),
+ isolate->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result);
}
@@ -747,12 +724,9 @@ void Execution::ConfigureInstance(Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
Isolate* isolate = Isolate::Current();
- Handle<Object> args[] = { instance, instance_template };
+ Object** args[2] = { instance.location(), instance_template.location() };
Execution::Call(isolate->configure_instance_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- exc);
+ isolate->js_builtins_object(), 2, args, exc);
}
@@ -761,13 +735,16 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> pos,
Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate();
- Handle<Object> args[] = { recv, fun, pos, is_global };
- bool caught_exception;
- Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
- isolate->js_builtins_object(),
- ARRAY_SIZE(args),
- args,
- &caught_exception);
+ const int argc = 4;
+ Object** args[argc] = { recv.location(),
+ Handle<Object>::cast(fun).location(),
+ pos.location(),
+ is_global.location() };
+ bool caught_exception = false;
+ Handle<Object> result =
+ TryCall(isolate->get_stack_trace_line_fun(),
+ isolate->js_builtins_object(), argc, args,
+ &caught_exception);
if (caught_exception || !result->IsString()) {
return isolate->factory()->empty_symbol();
}
@@ -875,12 +852,6 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
MaybeObject* Execution::HandleStackGuardInterrupt() {
Isolate* isolate = Isolate::Current();
StackGuard* stack_guard = isolate->stack_guard();
-
- if (stack_guard->IsGCRequest()) {
- isolate->heap()->CollectAllGarbage(false);
- stack_guard->Continue(GC_REQUEST);
- }
-
isolate->counters()->stack_interrupts()->Increment();
if (stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index f2d17d0792..5cd7141fc2 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,8 +41,7 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- RUNTIME_PROFILER_TICK = 1 << 5,
- GC_REQUEST = 1 << 6
+ RUNTIME_PROFILER_TICK = 1 << 5
};
class Execution : public AllStatic {
@@ -61,7 +60,7 @@ class Execution : public AllStatic {
static Handle<Object> Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
- Handle<Object> argv[],
+ Object*** args,
bool* pending_exception,
bool convert_receiver = false);
@@ -74,7 +73,7 @@ class Execution : public AllStatic {
//
static Handle<Object> New(Handle<JSFunction> func,
int argc,
- Handle<Object> argv[],
+ Object*** args,
bool* pending_exception);
// Call a function, just like Call(), but make sure to silently catch
@@ -84,7 +83,7 @@ class Execution : public AllStatic {
static Handle<Object> TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
- Handle<Object> argv[],
+ Object*** args,
bool* caught_exception);
// ECMA-262 9.2
@@ -197,8 +196,6 @@ class StackGuard {
bool IsDebugCommand();
void DebugCommand();
#endif
- bool IsGCRequest();
- void RequestGC();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 48e8c42057..3740c27aa8 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -40,7 +40,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ bool compact = false;
+ // All allocation spaces other than NEW_SPACE have the same effect.
+ if (args.Length() >= 1 && args[0]->IsBoolean()) {
+ compact = args[0]->BooleanValue();
+ }
+ HEAP->CollectAllGarbage(compact);
return v8::Undefined();
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 143b342083..97289266e3 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -234,7 +234,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
Handle<String> Factory::NewExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
+ ExternalAsciiString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromAscii(resource),
@@ -243,7 +243,7 @@ Handle<String> Factory::NewExternalStringFromAscii(
Handle<String> Factory::NewExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource) {
+ ExternalTwoByteString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@@ -404,12 +404,10 @@ Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
}
-Handle<Map> Factory::NewMap(InstanceType type,
- int instance_size,
- ElementsKind elements_kind) {
+Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
+ isolate()->heap()->AllocateMap(type, instance_size),
Map);
}
@@ -457,11 +455,23 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
}
+Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
+}
+
+
+Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
+}
+
+
Handle<Map> Factory::GetElementsTransitionMap(
- Handle<JSObject> src,
- ElementsKind elements_kind) {
+ Handle<Map> src,
+ ElementsKind elements_kind,
+ bool safe_to_add_transition) {
CALL_HEAP_FUNCTION(isolate(),
- src->GetElementsTransitionMap(elements_kind),
+ src->GetElementsTransitionMap(elements_kind,
+ safe_to_add_transition),
Map);
}
@@ -631,16 +641,14 @@ Handle<Object> Factory::NewError(const char* maker,
return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type);
- Handle<Object> argv[] = { type_obj, args };
+ Object** argv[2] = { type_obj.location(),
+ Handle<Object>::cast(args).location() };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ isolate()->js_builtins_object(), 2, argv, &caught_exception);
return result;
}
@@ -656,16 +664,13 @@ Handle<Object> Factory::NewError(const char* constructor,
Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr)));
- Handle<Object> argv[] = { message };
+ Object** argv[1] = { Handle<Object>::cast(message).location() };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ isolate()->js_builtins_object(), 1, argv, &caught_exception);
return result;
}
@@ -717,12 +722,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
if (force_initial_map ||
type != JS_OBJECT_TYPE ||
instance_size != JSObject::kHeaderSize) {
- ElementsKind default_elements_kind = FLAG_smi_only_arrays
- ? FAST_SMI_ONLY_ELEMENTS
- : FAST_ELEMENTS;
- Handle<Map> initial_map = NewMap(type,
- instance_size,
- default_elements_kind);
+ Handle<Map> initial_map = NewMap(type, instance_size);
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@@ -908,26 +908,11 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
- SetContent(result, elements);
+ result->SetContent(*elements);
return result;
}
-void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArray> elements) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->SetContent(*elements));
-}
-
-
-void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainNonSmiElements());
-}
-
-
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(
@@ -953,13 +938,6 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
-void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- object->SetIdentityHash(hash, ALLOW_CREATION));
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
@@ -1012,12 +990,6 @@ Handle<String> Factory::NumberToString(Handle<Object> number) {
}
-Handle<String> Factory::Uint32ToString(uint32_t value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->Uint32ToString(value), String);
-}
-
-
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
@@ -1327,20 +1299,4 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
}
-Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
- Heap* h = isolate()->heap();
- if (name->Equals(h->undefined_symbol())) return undefined_value();
- if (name->Equals(h->nan_symbol())) return nan_value();
- if (name->Equals(h->infinity_symbol())) return infinity_value();
- return Handle<Object>::null();
-}
-
-
-Handle<Object> Factory::ToBoolean(bool value) {
- return Handle<Object>(value
- ? isolate()->heap()->true_value()
- : isolate()->heap()->false_value());
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index a3615f2a0a..71ae750b38 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -145,9 +145,9 @@ class Factory {
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer.
Handle<String> NewExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource);
+ ExternalAsciiString::Resource* resource);
Handle<String> NewExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource);
+ ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext();
@@ -203,9 +203,7 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
- Handle<Map> NewMap(InstanceType type,
- int instance_size,
- ElementsKind elements_kind = FAST_ELEMENTS);
+ Handle<Map> NewMap(InstanceType type, int instance_size);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -217,8 +215,13 @@ class Factory {
Handle<Map> CopyMapDropTransitions(Handle<Map> map);
- Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind elements_kind);
+ Handle<Map> GetFastElementsMap(Handle<Map> map);
+
+ Handle<Map> GetSlowElementsMap(Handle<Map> map);
+
+ Handle<Map> GetElementsTransitionMap(Handle<Map> map,
+ ElementsKind elements_kind,
+ bool safe_to_add_transition);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
@@ -255,18 +258,12 @@ class Factory {
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
- void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
-
- void EnsureCanContainNonSmiElements(Handle<JSArray> array);
-
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
- void SetIdentityHash(Handle<JSObject> object, Object* hash);
-
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@@ -359,7 +356,6 @@ class Factory {
PropertyAttributes attributes);
Handle<String> NumberToString(Handle<Object> number);
- Handle<String> Uint32ToString(uint32_t value);
enum ApiInstanceType {
JavaScriptObject,
@@ -446,14 +442,6 @@ class Factory {
JSRegExp::Flags flags,
int capture_count);
- // Returns the value for a known global constant (a property of the global
- // object which is neither configurable nor writable) like 'undefined'.
- // Returns a null handle when the given name is unknown.
- Handle<Object> GlobalConstantFor(Handle<String> name);
-
- // Converts the given boolean condition to JavaScript boolean value.
- Handle<Object> ToBoolean(bool value);
-
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 58fab14e1c..7df2b0bf00 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -98,19 +98,13 @@ private:
// Flags for experimental language features.
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
-DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
-DEFINE_bool(harmony, false, "enable all harmony features")
+DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
// Flags for experimental implementation features.
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
-DEFINE_bool(string_slices, true, "use string slices")
-
-DEFINE_bool(clever_optimizations,
- true,
- "Optimize object size, Array shift, DOM strings and string +")
+DEFINE_bool(string_slices, false, "use string slices")
// Flags for Crankshaft.
#ifdef V8_TARGET_ARCH_MIPS
@@ -259,16 +253,10 @@ DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
-DEFINE_bool(trace_fragmentation, false,
- "report fragmentation for old pointer and data pages")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc")
-DEFINE_bool(incremental_marking, true, "use incremental marking")
-DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
-DEFINE_bool(trace_incremental_marking, false,
- "trace progress of the incremental marking")
// v8.cc
DEFINE_bool(use_idle_notification, true,
@@ -288,13 +276,8 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(lazy_sweeping, true,
- "Use lazy sweeping for old pointer and data spaces")
-DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
- "Flush code caches in maps during mark compact cycle.")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
-DEFINE_bool(compact_code_space, false, "Compact code space")
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
@@ -305,6 +288,9 @@ DEFINE_int(random_seed, 0,
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
+DEFINE_bool(use_big_map_space, true,
+ "Use big map space, but don't compact if it grew too big.")
+
DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
"Maximum number of pages in map space which still allows to encode "
"forwarding pointers. That's actually a constant, but it's useful "
@@ -340,6 +326,7 @@ DEFINE_bool(preemption, false,
// Regexp
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
+DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -361,15 +348,11 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit")
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
DEFINE_bool(debugger, false, "Enable JavaScript debugger")
DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-#endif // ENABLE_DEBUGGER_SUPPORT
-
DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".")
@@ -395,15 +378,6 @@ DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
DEFINE_string(gdbjit_dump_filter, "",
"dump only objects containing this substring")
-// mark-compact.cc
-DEFINE_bool(force_marking_deque_overflows, false,
- "force overflows of marking deque by reducing it's size "
- "to 64 words")
-
-DEFINE_bool(stress_compaction, false,
- "stress the GC compactor to flush out bugs (implies "
- "--force_marking_deque_overflows)")
-
//
// Debug only flags
//
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 94c745cfc2..7ba79bf1b5 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -77,21 +77,6 @@ inline StackHandler* StackHandler::FromAddress(Address address) {
}
-inline bool StackHandler::is_entry() const {
- return state() == ENTRY;
-}
-
-
-inline bool StackHandler::is_try_catch() const {
- return state() == TRY_CATCH;
-}
-
-
-inline bool StackHandler::is_try_finally() const {
- return state() == TRY_FINALLY;
-}
-
-
inline StackHandler::State StackHandler::state() const {
const int offset = StackHandlerConstants::kStateOffset;
return static_cast<State>(Memory::int_at(address() + offset));
@@ -120,33 +105,8 @@ inline StackHandler* StackFrame::top_handler() const {
}
-inline Code* StackFrame::LookupCode() const {
- return GetContainingCode(isolate(), pc());
-}
-
-
inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
- return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
-}
-
-
-inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) {
-}
-
-
-inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
- : EntryFrame(iterator) {
-}
-
-
-inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) {
-}
-
-
-inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
- : StackFrame(iterator) {
+ return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
}
@@ -195,11 +155,6 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
-inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) {
-}
-
-
Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount();
ASSERT(-1 <= index && index < param_count);
@@ -235,26 +190,6 @@ inline Object* JavaScriptFrame::function() const {
}
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
- : JavaScriptFrame(iterator) {
-}
-
-
-inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
- StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
-}
-
-
-inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator) {
-}
-
-
-inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
- : InternalFrame(iterator) {
-}
-
-
template<typename Iterator>
inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
Isolate* isolate)
@@ -262,15 +197,6 @@ inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
if (!done()) Advance();
}
-
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
- Isolate* isolate, ThreadLocalTop* top)
- : iterator_(isolate, top) {
- if (!done()) Advance();
-}
-
-
template<typename Iterator>
inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 412a59cc7d..bebd10a806 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -366,17 +366,16 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Isolate* isolate,
- Address inner_pointer,
+ Address pc,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
- InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
+ PcToCodeCache::PcToCodeCacheEntry* entry =
+ isolate->pc_to_code_cache()->GetCacheEntry(pc);
if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
+ entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(entry->safepoint_entry.is_valid());
} else {
- ASSERT(entry->safepoint_entry.Equals(
- entry->code->GetSafepointEntry(inner_pointer)));
+ ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
}
// Fill in the results and return the code.
@@ -393,16 +392,11 @@ bool StackFrame::HasHandler() const {
}
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* object, Address addr);
-#endif
-
-
void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address,
Code* holder) {
Address pc = *pc_address;
- ASSERT(GcSafeCodeContains(holder, pc));
+ ASSERT(holder->contains(pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
v->VisitPointer(&code);
@@ -825,8 +819,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
- code = isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc());
+ code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -888,11 +881,6 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
}
-int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
- return Smi::cast(GetExpression(0))->value();
-}
-
-
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
@@ -1167,89 +1155,52 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
-static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
- MapWord map_word = object->map_word();
- return map_word.IsForwardingAddress() ?
- map_word.ToForwardingAddress()->map() : map_word.ToMap();
-}
-
-
-static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
- return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
-}
-
-
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* code, Address addr) {
- Map* map = GcSafeMapOfCodeSpaceObject(code);
- ASSERT(map == code->GetHeap()->code_map());
- Address start = code->address();
- Address end = code->address() + code->SizeFromMap(map);
- return start <= addr && addr < end;
-}
-#endif
-
-
-Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
- Address inner_pointer) {
+Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
Code* code = reinterpret_cast<Code*>(object);
- ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
+ ASSERT(code != NULL && code->contains(pc));
return code;
}
-Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
- Address inner_pointer) {
+Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
Heap* heap = isolate_->heap();
- // Check if the inner pointer points into a large object chunk.
- LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
- if (large_page != NULL) {
- return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
- }
-
- // Iterate through the page until we reach the end or find an object starting
- // after the inner pointer.
- Page* page = Page::FromAddress(inner_pointer);
-
- Address addr = page->skip_list()->StartFor(inner_pointer);
-
- Address top = heap->code_space()->top();
- Address limit = heap->code_space()->limit();
-
+ // Check if the pc points into a large object chunk.
+ LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
+ if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+
+ // Iterate through the 8K page until we reach the end or find an
+ // object starting after the pc.
+ Page* page = Page::FromAddress(pc);
+ HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
+ HeapObject* previous = NULL;
while (true) {
- if (addr == top && addr != limit) {
- addr = limit;
- continue;
+ HeapObject* next = iterator.next();
+ if (next == NULL || next->address() >= pc) {
+ return GcSafeCastToCode(previous, pc);
}
-
- HeapObject* obj = HeapObject::FromAddress(addr);
- int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
- Address next_addr = addr + obj_size;
- if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
- addr = next_addr;
+ previous = next;
}
}
-InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
- InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
+PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
isolate_->counters()->pc_to_code()->Increment();
- ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
+ ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
- uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
- InnerPointerToCodeCacheEntry* entry = cache(index);
- if (entry->inner_pointer == inner_pointer) {
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
+ uint32_t index = hash & (kPcToCodeCacheSize - 1);
+ PcToCodeCacheEntry* entry = cache(index);
+ if (entry->pc == pc) {
isolate_->counters()->pc_to_code_cached()->Increment();
- ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
+ ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else {
// Because this code may be interrupted by a profiling signal that
- // also queries the cache, we cannot update inner_pointer before the code
- // has been set. Otherwise, we risk trying to use a cache entry before
+ // also queries the cache, we cannot update pc before the code has
+ // been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
- entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
+ entry->code = GcSafeFindCodeForPc(pc);
entry->safepoint_entry.Reset();
- entry->inner_pointer = inner_pointer;
+ entry->pc = pc;
}
return entry;
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index ca19b053aa..fed11c4faf 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -49,36 +49,36 @@ class StackFrameIterator;
class ThreadLocalTop;
class Isolate;
-class InnerPointerToCodeCache {
+class PcToCodeCache {
public:
- struct InnerPointerToCodeCacheEntry {
- Address inner_pointer;
+ struct PcToCodeCacheEntry {
+ Address pc;
Code* code;
SafepointEntry safepoint_entry;
};
- explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
+ explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
Flush();
}
- Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
- Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
+ Code* GcSafeFindCodeForPc(Address pc);
+ Code* GcSafeCastToCode(HeapObject* object, Address pc);
void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
- InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
+ PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
- InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+ PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
Isolate* isolate_;
- static const int kInnerPointerToCodeCacheSize = 1024;
- InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
+ static const int kPcToCodeCacheSize = 1024;
+ PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
- DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
+ DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
};
@@ -106,9 +106,9 @@ class StackHandler BASE_EMBEDDED {
static inline StackHandler* FromAddress(Address address);
// Testers
- inline bool is_entry() const;
- inline bool is_try_catch() const;
- inline bool is_try_finally() const;
+ bool is_entry() { return state() == ENTRY; }
+ bool is_try_catch() { return state() == TRY_CATCH; }
+ bool is_try_finally() { return state() == TRY_FINALLY; }
private:
// Accessors.
@@ -139,10 +139,7 @@ class StackFrame BASE_EMBEDDED {
enum Type {
NONE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES,
- // Used by FrameScope to indicate that the stack frame is constructed
- // manually and the FrameScope does not need to emit code.
- MANUAL
+ NUMBER_OF_TYPES
};
#undef DECLARE_TYPE
@@ -218,7 +215,9 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
- inline Code* LookupCode() const;
+ Code* LookupCode() const {
+ return GetContainingCode(isolate(), pc());
+ }
// Get the code object that contains the given pc.
static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@@ -300,7 +299,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp);
protected:
- inline explicit EntryFrame(StackFrameIterator* iterator);
+ explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
@@ -327,7 +326,8 @@ class EntryConstructFrame: public EntryFrame {
}
protected:
- inline explicit EntryConstructFrame(StackFrameIterator* iterator);
+ explicit EntryConstructFrame(StackFrameIterator* iterator)
+ : EntryFrame(iterator) { }
private:
friend class StackFrameIterator;
@@ -361,7 +361,7 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state);
protected:
- inline explicit ExitFrame(StackFrameIterator* iterator);
+ explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
@@ -394,7 +394,8 @@ class StandardFrame: public StackFrame {
}
protected:
- inline explicit StandardFrame(StackFrameIterator* iterator);
+ explicit StandardFrame(StackFrameIterator* iterator)
+ : StackFrame(iterator) { }
virtual void ComputeCallerState(State* state) const;
@@ -513,7 +514,8 @@ class JavaScriptFrame: public StandardFrame {
}
protected:
- inline explicit JavaScriptFrame(StackFrameIterator* iterator);
+ explicit JavaScriptFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
@@ -550,7 +552,8 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
- inline explicit OptimizedFrame(StackFrameIterator* iterator);
+ explicit OptimizedFrame(StackFrameIterator* iterator)
+ : JavaScriptFrame(iterator) { }
private:
friend class StackFrameIterator;
@@ -578,9 +581,12 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const;
protected:
- inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
+ explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
+ : JavaScriptFrame(iterator) { }
- virtual int GetNumberOfIncomingArguments() const;
+ virtual int GetNumberOfIncomingArguments() const {
+ return Smi::cast(GetExpression(0))->value();
+ }
virtual Address GetCallerStackPointer() const;
@@ -605,7 +611,8 @@ class InternalFrame: public StandardFrame {
}
protected:
- inline explicit InternalFrame(StackFrameIterator* iterator);
+ explicit InternalFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
@@ -626,7 +633,8 @@ class ConstructFrame: public InternalFrame {
}
protected:
- inline explicit ConstructFrame(StackFrameIterator* iterator);
+ explicit ConstructFrame(StackFrameIterator* iterator)
+ : InternalFrame(iterator) { }
private:
friend class StackFrameIterator;
@@ -702,26 +710,20 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
- inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
-
// Skip frames until the frame with the given id is reached.
explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
- JavaScriptFrameIteratorTemp(Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
+ JavaScriptFrameIteratorTemp(Address fp, Address sp,
+ Address low_bound, Address high_bound) :
iterator_(fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}
JavaScriptFrameIteratorTemp(Isolate* isolate,
- Address fp,
- Address sp,
- Address low_bound,
- Address high_bound) :
+ Address fp, Address sp,
+ Address low_bound, Address high_bound) :
iterator_(isolate, fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 083675d133..8073874132 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -244,6 +244,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
}
+void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
+ Visit(expr->expression());
+}
+
+
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
@@ -286,10 +291,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
-#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_has_debug_break_slots(
info->isolate()->debugger()->IsDebuggerActive());
-#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
@@ -520,7 +523,7 @@ void FullCodeGenerator::VisitDeclarations(
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
- if (var->mode() == CONST) {
+ if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
@@ -820,19 +823,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
if (stmt->block_scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope();
- Handle<SerializedScopeInfo> scope_info = scope_->GetSerializedScopeInfo();
- int heap_slots =
- scope_info->NumberOfContextSlots() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
+ __ Push(scope_->GetSerializedScopeInfo());
PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
- }
-
- // Replace the context stored in the frame.
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1328,21 +1321,19 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
}
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
- Expression *sub_expr;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Expression *expr;
Handle<String> check;
- if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
- EmitLiteralCompareTypeof(sub_expr, check);
- return true;
- }
-
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
- EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+ if (compare->IsLiteralCompareTypeof(&expr, &check)) {
+ EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
return true;
}
- if (expr->IsLiteralCompareNull(&sub_expr)) {
- EmitLiteralCompareNil(expr, sub_expr, kNullValue);
+ if (compare->IsLiteralCompareUndefined(&expr)) {
+ EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
return true;
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 081192a541..803c618732 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -391,16 +391,25 @@ class FullCodeGenerator: public AstVisitor {
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
- bool TryLiteralCompare(CompareOperation* compare);
+ bool TryLiteralCompare(CompareOperation* compare,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
// Platform-specific code for comparing the type of a value with
// a given literal string.
- void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check);
-
- // Platform-specific code for equality comparison with a nil-like value.
- void EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil);
+ void EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Platform-specific code for strict equality comparison with
+ // the undefined value.
+ void EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
// Bailout support.
void PrepareForBailout(Expression* node, State state);
@@ -423,7 +432,7 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function,
int* global_count);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index d0c78d6e22..6c6966aee5 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -255,10 +255,6 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-
// ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 57f5d1b66f..35c363c10c 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -190,11 +190,7 @@ static int ExpectedNofPropertiesFromEstimate(int estimate) {
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
- if (FLAG_clever_optimizations) {
- return estimate + 8;
- } else {
- return estimate + 3;
- }
+ return estimate + 8;
}
@@ -425,18 +421,17 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) {
}
-Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value) {
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+ JSObject::HiddenPropertiesFlag flag) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetHiddenProperty(*key, *value),
+ obj->GetHiddenProperties(flag),
Object);
}
-int GetIdentityHash(Handle<JSReceiver> obj) {
+int GetIdentityHash(Handle<JSObject> obj) {
CALL_AND_RETRY(obj->GetIsolate(),
- obj->GetIdentityHash(ALLOW_CREATION),
+ obj->GetIdentityHash(JSObject::ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
@@ -891,7 +886,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<JSReceiver> key,
+ Handle<JSObject> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Put(*key, *value),
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index d5521f89c1..7eaf4de927 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -263,13 +263,14 @@ Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-// Sets a hidden property on an object. Returns obj on success, undefined
-// if trying to set the property on a detached proxy.
-Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value);
+// Return the object's hidden properties object. If the object has no hidden
+// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
+// hidden property object will be allocated. Otherwise Heap::undefined_value
+// is returned.
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+ JSObject::HiddenPropertiesFlag flag);
-int GetIdentityHash(Handle<JSReceiver> obj);
+int GetIdentityHash(Handle<JSObject> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@@ -347,7 +348,7 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object);
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<JSReceiver> key,
+ Handle<JSObject> key,
Handle<Object> value);
// Does lazy compilation of the given function. Returns true on success and
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 4bd893e8ee..7b666af5b0 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -33,26 +33,15 @@
#include "list-inl.h"
#include "objects.h"
#include "v8-counters.h"
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
namespace v8 {
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
- if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
- NewSpacePage* rear_page =
- NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
- ASSERT(!rear_page->prev_page()->is_anchor());
- rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
- }
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
-#ifdef DEBUG
- SemiSpace::AssertValidRange(HEAP->new_space()->top(),
- reinterpret_cast<Address>(rear_));
-#endif
+ ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
}
@@ -95,7 +84,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -128,7 +117,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -192,7 +181,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
- result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ result = lo_space_->AllocateRaw(size_in_bytes);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
@@ -276,11 +265,6 @@ bool Heap::InNewSpace(Object* object) {
}
-bool Heap::InNewSpace(Address addr) {
- return new_space_.Contains(addr);
-}
-
-
bool Heap::InFromSpace(Object* object) {
return new_space_.FromSpaceContains(object);
}
@@ -291,36 +275,29 @@ bool Heap::InToSpace(Object* object) {
}
-bool Heap::OldGenerationAllocationLimitReached() {
- if (!incremental_marking()->IsStopped()) return false;
- return OldGenerationSpaceAvailable() < 0;
-}
-
-
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if:
// - the object has survived a scavenge operation or
// - to space is already 25% full.
- NewSpacePage* page = NewSpacePage::FromAddress(old_address);
- Address age_mark = new_space_.age_mark();
- bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
- (!page->ContainsLimit(age_mark) || old_address < age_mark);
- return below_mark || (new_space_.Size() + object_size) >=
- (new_space_.EffectiveCapacity() >> 2);
+ return old_address < new_space_.age_mark()
+ || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
}
void Heap::RecordWrite(Address address, int offset) {
- if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
+ if (new_space_.Contains(address)) return;
+ ASSERT(!new_space_.FromSpaceContains(address));
+ SLOW_ASSERT(Contains(address + offset));
+ Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
void Heap::RecordWrites(Address address, int start, int len) {
- if (!InNewSpace(address)) {
- for (int i = 0; i < len; i++) {
- store_buffer_.Mark(address + start + i * kPointerSize);
- }
- }
+ if (new_space_.Contains(address)) return;
+ ASSERT(!new_space_.FromSpaceContains(address));
+ Page* page = Page::FromAddress(address);
+ page->SetRegionMarks(page->GetRegionMarks() |
+ page->GetRegionMaskForSpan(address + start, len * kPointerSize));
}
@@ -366,6 +343,31 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
}
+void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size) {
+ ASSERT(IsAligned(byte_size, kPointerSize));
+
+ Page* page = Page::FromAddress(dst);
+ uint32_t marks = page->GetRegionMarks();
+
+ for (int remaining = byte_size / kPointerSize;
+ remaining > 0;
+ remaining--) {
+ Memory::Object_at(dst) = Memory::Object_at(src);
+
+ if (InNewSpace(Memory::Object_at(dst))) {
+ marks |= page->GetRegionMaskForAddress(dst);
+ }
+
+ dst += kPointerSize;
+ src += kPointerSize;
+ }
+
+ page->SetRegionMarks(marks);
+}
+
+
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
@@ -385,6 +387,16 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
+void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size) {
+ ASSERT(IsAligned(byte_size, kPointerSize));
+ ASSERT((dst < src) || (dst >= (src + byte_size)));
+
+ CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
+}
+
+
void Heap::ScavengePointer(HeapObject** p) {
ScavengeObject(p, *p);
}
@@ -402,9 +414,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
- ASSERT(HEAP->InFromSpace(*p));
- *p = dest;
+ *p = first_word.ToForwardingAddress();
return;
}
@@ -449,7 +459,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
}
} else {
// Avoid underflow.
@@ -466,7 +476,6 @@ void Heap::SetLastScriptId(Object* last_script_id) {
roots_[kLastScriptIdRootIndex] = last_script_id;
}
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -679,6 +688,15 @@ Heap* _inline_get_heap_() {
}
+void MarkCompactCollector::SetMark(HeapObject* obj) {
+ tracer_->increment_marked_count();
+#ifdef DEBUG
+ UpdateLiveObjectCount(obj);
+#endif
+ obj->SetMark();
+}
+
+
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 46c63c27c8..7e613e9173 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -114,6 +114,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
+ HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index c6efd62050..d0185930b7 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -36,16 +36,13 @@
#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
-#include "incremental-marking.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "snapshot.h"
-#include "store-buffer.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -61,6 +58,10 @@ namespace v8 {
namespace internal {
+static const intptr_t kMinimumPromotionLimit = 2 * MB;
+static const intptr_t kMinimumAllocationLimit = 8 * MB;
+
+
static Mutex* gc_initializer_mutex = OS::CreateMutex();
@@ -69,21 +70,27 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-#define LUMP_OF_MEMORY (128 * KB)
+ reserved_semispace_size_(2*MB),
+ max_semispace_size_(2*MB),
+ initial_semispace_size_(128*KB),
+ max_old_generation_size_(192*MB),
+ max_executable_size_(max_old_generation_size_),
code_range_size_(0),
#elif defined(V8_TARGET_ARCH_X64)
-#define LUMP_OF_MEMORY (2 * MB)
+ reserved_semispace_size_(16*MB),
+ max_semispace_size_(16*MB),
+ initial_semispace_size_(1*MB),
+ max_old_generation_size_(1400*MB),
+ max_executable_size_(256*MB),
code_range_size_(512*MB),
#else
-#define LUMP_OF_MEMORY MB
+ reserved_semispace_size_(8*MB),
+ max_semispace_size_(8*MB),
+ initial_semispace_size_(512*KB),
+ max_old_generation_size_(700*MB),
+ max_executable_size_(128*MB),
code_range_size_(0),
#endif
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(128l * LUMP_OF_MEMORY),
-
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -93,7 +100,6 @@ Heap::Heap()
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
- scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
old_data_space_(NULL),
@@ -103,6 +109,7 @@ Heap::Heap()
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
+ mc_count_(0),
ms_count_(0),
gc_count_(0),
unflattened_strings_length_(0),
@@ -114,13 +121,10 @@ Heap::Heap()
#endif // DEBUG
old_gen_promotion_limit_(kMinimumPromotionLimit),
old_gen_allocation_limit_(kMinimumAllocationLimit),
- old_gen_limit_factor_(1),
- size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
- store_buffer_rebuilder_(store_buffer()),
hidden_symbol_(NULL),
global_gc_prologue_callback_(NULL),
global_gc_epilogue_callback_(NULL),
@@ -137,14 +141,12 @@ Heap::Heap()
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
last_gc_end_timestamp_(0.0),
- store_buffer_(this),
- marking_(this),
- incremental_marking_(this),
+ page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
configured_(false),
- chunks_queued_for_free_(NULL) {
+ is_safe_to_read_maps_(true) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -222,10 +224,29 @@ bool Heap::HasBeenSetup() {
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- if (IntrusiveMarking::IsMarked(object)) {
- return IntrusiveMarking::SizeOfMarkedObject(object);
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return object->SizeFromMap(map_word.ToMap());
+}
+
+
+int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
+ uint32_t marker = Memory::uint32_at(object->address());
+ if (marker == MarkCompactCollector::kSingleFreeEncoding) {
+ return kIntSize;
+ } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
+ return Memory::int_at(object->address() + kIntSize);
+ } else {
+ MapWord map_word = object->map_word();
+ Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
+ return object->SizeFromMap(map);
}
- return object->SizeFromMap(object->map());
}
@@ -379,7 +400,6 @@ void Heap::GarbageCollectionPrologue() {
#endif // DEBUG
LiveObjectList::GCPrologue();
- store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@@ -392,7 +412,6 @@ intptr_t Heap::SizeOfObjects() {
}
void Heap::GarbageCollectionEpilogue() {
- store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
@@ -424,13 +443,13 @@ void Heap::GarbageCollectionEpilogue() {
}
-void Heap::CollectAllGarbage(int flags) {
+void Heap::CollectAllGarbage(bool force_compaction) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- mark_compact_collector_.SetFlags(flags);
+ mark_compact_collector_.SetForceCompaction(force_compaction);
CollectGarbage(OLD_POINTER_SPACE);
- mark_compact_collector_.SetFlags(kNoGCFlags);
+ mark_compact_collector_.SetForceCompaction(false);
}
@@ -438,6 +457,8 @@ void Heap::CollectAllAvailableGarbage() {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
+ mark_compact_collector()->SetForceCompaction(true);
+
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
@@ -446,14 +467,13 @@ void Heap::CollectAllAvailableGarbage() {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
- mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
break;
}
}
- mark_compact_collector()->SetFlags(kNoGCFlags);
+ mark_compact_collector()->SetForceCompaction(false);
}
@@ -470,23 +490,6 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
- if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Scavenge during marking.\n");
- }
- }
-
- if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->PreciseSweepingRequired() &&
- !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() &&
- FLAG_incremental_marking_steps) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
- }
- collector = SCAVENGER;
- }
-
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this);
@@ -509,24 +512,13 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
GarbageCollectionEpilogue();
}
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
- if (incremental_marking()->IsStopped()) {
- if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
- incremental_marking()->Start();
- }
- }
-
return next_gc_likely_to_collect_more;
}
void Heap::PerformScavenge() {
GCTracer tracer(this);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
+ PerformGarbageCollection(SCAVENGER, &tracer);
}
@@ -618,6 +610,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Try shrinking and try again.
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->RelinkPageListInChunkOrder(true);
+ }
+
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -648,10 +647,7 @@ void Heap::ClearJSFunctionResultCaches() {
void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive() &&
- !incremental_marking()->IsMarking()) {
- return;
- }
+ if (isolate_->bootstrapper()->IsActive()) return;
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
@@ -661,6 +657,24 @@ void Heap::ClearNormalizedMapCaches() {
}
+#ifdef DEBUG
+
+enum PageWatermarkValidity {
+ ALL_VALID,
+ ALL_INVALID
+};
+
+static void VerifyPageWatermarkValidity(PagedSpace* space,
+ PageWatermarkValidity validity) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ bool expected_value = (validity == ALL_VALID);
+ while (it.has_next()) {
+ Page* page = it.next();
+ ASSERT(page->IsWatermarkValid() == expected_value);
+ }
+}
+#endif
+
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -713,13 +727,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
int start_new_space_size = Heap::new_space()->SizeAsInt();
- if (IsHighSurvivalRate()) {
- // We speed up the incremental marker if it is running so that it
- // does not fall behind the rate of promotion, which would cause a
- // constantly growing old space.
- incremental_marking()->NotifyOfHighPromotionRate();
- }
-
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
@@ -729,7 +736,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+ intptr_t old_gen_size = PromotedSpaceSize();
+ old_gen_promotion_limit_ =
+ old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+ old_gen_allocation_limit_ =
+ old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@@ -739,16 +750,10 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
- old_gen_limit_factor_ = 2;
- } else {
- old_gen_limit_factor_ = 1;
+ old_gen_promotion_limit_ *= 2;
+ old_gen_allocation_limit_ *= 2;
}
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
@@ -777,7 +782,9 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
amount_of_external_allocated_memory_;
}
- GCCallbackFlags callback_flags = kNoGCCallbackFlags;
+ GCCallbackFlags callback_flags = tracer->is_compacting()
+ ? kGCCallbackFlagCompacted
+ : kNoGCCallbackFlags;
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -801,24 +808,34 @@ void Heap::MarkCompact(GCTracer* tracer) {
mark_compact_collector_.Prepare(tracer);
- ms_count_++;
- tracer->set_full_gc_count(ms_count_);
+ bool is_compacting = mark_compact_collector_.IsCompacting();
- MarkCompactPrologue();
+ if (is_compacting) {
+ mc_count_++;
+ } else {
+ ms_count_++;
+ }
+ tracer->set_full_gc_count(mc_count_ + ms_count_);
+ MarkCompactPrologue(is_compacting);
+
+ is_safe_to_read_maps_ = false;
mark_compact_collector_.CollectGarbage();
+ is_safe_to_read_maps_ = true;
LOG(isolate_, ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
+ Shrink();
+
isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
}
-void Heap::MarkCompactPrologue() {
+void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
isolate_->keyed_lookup_cache()->Clear();
@@ -830,8 +847,7 @@ void Heap::MarkCompactPrologue() {
CompletelyClearInstanceofCache();
- // TODO(1605) select heuristic for flushing NumberString cache with
- // FlushNumberStringCache
+ if (is_compacting) FlushNumberStringCache();
if (FLAG_cleanup_code_caches_at_gc) {
polymorphic_code_cache()->set_cache(undefined_value());
}
@@ -841,8 +857,13 @@ void Heap::MarkCompactPrologue() {
Object* Heap::FindCodeObject(Address a) {
- return isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(a);
+ Object* obj = NULL; // Initialization to please compiler.
+ { MaybeObject* maybe_obj = code_space_->FindObject(a);
+ if (!maybe_obj->ToObject(&obj)) {
+ obj = lo_space_->FindObject(a)->ToObjectUnchecked();
+ }
+ }
+ return obj;
}
@@ -890,18 +911,14 @@ static void VerifyNonPointerSpacePointers() {
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(HEAP->code_space());
- for (HeapObject* object = code_it.Next();
- object != NULL; object = code_it.Next())
+ for (HeapObject* object = code_it.next();
+ object != NULL; object = code_it.next())
object->Iterate(&v);
- // The old data space was normally swept conservatively so that the iterator
- // doesn't work, so we normally skip the next bit.
- if (!HEAP->old_data_space()->was_swept_conservatively()) {
- HeapObjectIterator data_it(HEAP->old_data_space());
- for (HeapObject* object = data_it.Next();
- object != NULL; object = data_it.Next())
- object->Iterate(&v);
- }
+ HeapObjectIterator data_it(HEAP->old_data_space());
+ for (HeapObject* object = data_it.next();
+ object != NULL; object = data_it.next())
+ object->Iterate(&v);
}
#endif
@@ -923,64 +940,6 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
}
-void Heap::ScavengeStoreBufferCallback(
- Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event) {
- heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
- if (event == kStoreBufferStartScanningPagesEvent) {
- start_of_current_page_ = NULL;
- current_page_ = NULL;
- } else if (event == kStoreBufferScanningPageEvent) {
- if (current_page_ != NULL) {
- // If this page already overflowed the store buffer during this iteration.
- if (current_page_->scan_on_scavenge()) {
- // Then we should wipe out the entries that have been added for it.
- store_buffer_->SetTop(start_of_current_page_);
- } else if (store_buffer_->Top() - start_of_current_page_ >=
- (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
- // Did we find too many pointers in the previous page? The heuristic is
- // that no page can take more then 1/5 the remaining slots in the store
- // buffer.
- current_page_->set_scan_on_scavenge(true);
- store_buffer_->SetTop(start_of_current_page_);
- } else {
- // In this case the page we scanned took a reasonable number of slots in
- // the store buffer. It has now been rehabilitated and is no longer
- // marked scan_on_scavenge.
- ASSERT(!current_page_->scan_on_scavenge());
- }
- }
- start_of_current_page_ = store_buffer_->Top();
- current_page_ = page;
- } else if (event == kStoreBufferFullEvent) {
- // The current page overflowed the store buffer again. Wipe out its entries
- // in the store buffer and mark it scan-on-scavenge again. This may happen
- // several times while scanning.
- if (current_page_ == NULL) {
- // Store Buffer overflowed while scanning promoted objects. These are not
- // in any particular page, though they are likely to be clustered by the
- // allocation routines.
- store_buffer_->HandleFullness();
- } else {
- // Store Buffer overflowed while scanning a particular old space page for
- // pointers to new space.
- ASSERT(current_page_ == page);
- ASSERT(page != NULL);
- current_page_->set_scan_on_scavenge(true);
- ASSERT(start_of_current_page_ != store_buffer_->Top());
- store_buffer_->SetTop(start_of_current_page_);
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -988,6 +947,22 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
+ SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
+ Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
+#ifdef DEBUG
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_VALID);
+#endif
+
+ // We do not update an allocation watermark of the top page during linear
+ // allocation to avoid overhead. So to maintain the watermark invariant
+ // we have to manually cache the watermark and mark the top page as having an
+ // invalid watermark. This guarantees that dirty regions iteration will use a
+ // correct watermark even if a linear allocation happens.
+ old_pointer_space_->FlushTopPageWatermark();
+ map_space_->FlushTopPageWatermark();
+
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -999,13 +974,6 @@ void Heap::Scavenge() {
CheckNewSpaceExpansionCriteria();
- SelectScavengingVisitorsTable();
-
- incremental_marking()->PrepareForScavenge();
-
- old_pointer_space()->AdvanceSweeper(new_space_.Size());
- old_data_space()->AdvanceSweeper(new_space_.Size());
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
@@ -1028,29 +996,32 @@ void Heap::Scavenge() {
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceStart();
- promotion_queue_.Initialize(new_space_.ToSpaceEnd());
-
-#ifdef DEBUG
- store_buffer()->Clean();
-#endif
+ Address new_space_front = new_space_.ToSpaceLow();
+ promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+ is_safe_to_read_maps_ = false;
ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- // Copy objects reachable from the old generation.
- {
- StoreBufferRebuildScope scope(this,
- store_buffer(),
- &ScavengeStoreBufferCallback);
- store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
- }
+ // Copy objects reachable from the old generation. By definition,
+ // there are no intergenerational pointers in code or data spaces.
+ IterateDirtyRegions(old_pointer_space_,
+ &Heap::IteratePointersInDirtyRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ IterateDirtyRegions(map_space_,
+ &IteratePointersInDirtyMapsRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ lo_space_->IterateDirtyRegions(&ScavengePointer);
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL; cell = cell_iterator.Next()) {
+ for (HeapObject* cell = cell_iterator.next();
+ cell != NULL; cell = cell_iterator.next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
@@ -1075,16 +1046,14 @@ void Heap::Scavenge() {
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
- incremental_marking()->UpdateMarkingDequeAfterScavenge();
ASSERT(new_space_front == new_space_.top());
+ is_safe_to_read_maps_ = true;
+
// Set age mark.
new_space_.set_age_mark(new_space_.top());
- new_space_.LowerInlineAllocationLimit(
- new_space_.inline_allocation_limit_step());
-
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
@@ -1143,56 +1112,35 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
}
-void Heap::UpdateReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func) {
-
- // Update old space string references.
- if (external_string_table_.old_space_strings_.length() > 0) {
- Object** start = &external_string_table_.old_space_strings_[0];
- Object** end = start + external_string_table_.old_space_strings_.length();
- for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
- }
-
- UpdateNewSpaceReferencesInExternalStringTable(updater_func);
-}
-
-
static Object* ProcessFunctionWeakReferences(Heap* heap,
Object* function,
WeakObjectRetainer* retainer) {
- Object* undefined = heap->undefined_value();
- Object* head = undefined;
+ Object* head = heap->undefined_value();
JSFunction* tail = NULL;
Object* candidate = function;
- while (candidate != undefined) {
+ while (candidate != heap->undefined_value()) {
// Check whether to keep the candidate in the list.
JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head == undefined) {
+ if (head == heap->undefined_value()) {
// First element in the list.
- head = retain;
+ head = candidate_function;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_next_function_link(retain);
+ tail->set_next_function_link(candidate_function);
}
// Retained function is new tail.
- candidate_function = reinterpret_cast<JSFunction*>(retain);
tail = candidate_function;
-
- ASSERT(retain->IsUndefined() || retain->IsJSFunction());
-
- if (retain == undefined) break;
}
-
// Move to next element in the list.
candidate = candidate_function->next_function_link();
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(undefined);
+ tail->set_next_function_link(heap->undefined_value());
}
return head;
@@ -1200,32 +1148,28 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* undefined = undefined_value();
- Object* head = undefined;
+ Object* head = undefined_value();
Context* tail = NULL;
Object* candidate = global_contexts_list_;
- while (candidate != undefined) {
+ while (candidate != undefined_value()) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head == undefined) {
+ if (head == undefined_value()) {
// First element in the list.
- head = retain;
+ head = candidate_context;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_unchecked(this,
Context::NEXT_CONTEXT_LINK,
- retain,
+ candidate_context,
UPDATE_WRITE_BARRIER);
}
// Retained context is new tail.
- candidate_context = reinterpret_cast<Context*>(retain);
tail = candidate_context;
- if (retain == undefined) break;
-
// Process the weak list of optimized functions for the context.
Object* function_list_head =
ProcessFunctionWeakReferences(
@@ -1237,7 +1181,6 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
function_list_head,
UPDATE_WRITE_BARRIER);
}
-
// Move to next element in the list.
candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
}
@@ -1269,45 +1212,35 @@ class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
- SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+ ASSERT(new_space_front <= new_space_.top());
+
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
- while (new_space_front != new_space_.top()) {
- if (!NewSpacePage::IsAtEnd(new_space_front)) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front +=
- NewSpaceScavenger::IterateBody(object->map(), object);
- } else {
- new_space_front =
- NewSpacePage::FromLimit(new_space_front)->next_page()->body();
- }
+ while (new_space_front < new_space_.top()) {
+ HeapObject* object = HeapObject::FromAddress(new_space_front);
+ new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
}
// Promote and process all the to-be-promoted objects.
- {
- StoreBufferRebuildScope scope(this,
- store_buffer(),
- &ScavengeStoreBufferCallback);
- while (!promotion_queue()->is_empty()) {
- HeapObject* target;
- int size;
- promotion_queue()->remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during old space pointer iteration. Thus we search specificly
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- ASSERT(!target->IsMap());
- IterateAndMarkPointersToFromSpace(target->address(),
- target->address() + size,
- &ScavengeObject);
- }
+ while (!promotion_queue_.is_empty()) {
+ HeapObject* target;
+ int size;
+ promotion_queue_.remove(&target, &size);
+
+ // Promoted object might be already partially visited
+ // during dirty regions iteration. Thus we search specificly
+ // for pointers to from semispace instead of looking for pointers
+ // to new space.
+ ASSERT(!target->IsMap());
+ IterateAndMarkPointersToFromSpace(target->address(),
+ target->address() + size,
+ &ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
- } while (new_space_front != new_space_.top());
+ } while (new_space_front < new_space_.top());
return new_space_front;
}
@@ -1319,11 +1252,26 @@ enum LoggingAndProfiling {
};
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+typedef void (*ScavengingCallback)(Map* map,
+ HeapObject** slot,
+ HeapObject* object);
+
+
+static Atomic32 scavenging_visitors_table_mode_;
+static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+
+INLINE(static void DoScavengeObject(Map* map,
+ HeapObject** slot,
+ HeapObject* obj));
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
+void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+ scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+}
+
+
+template<LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
@@ -1358,13 +1306,9 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
- if (marks_handling == IGNORE_MARKS) {
- table_.Register(kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>);
- } else {
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
- }
+ table_.Register(kVisitJSFunction,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ template VisitSpecialized<JSFunction::kSize>);
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject,
@@ -1429,15 +1373,10 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
- if (marks_handling == TRANSFER_MARKS) {
- if (Marking::TransferColor(source, target)) {
- MemoryChunk::IncrementLiveBytes(target->address(), size);
- }
- }
-
return target;
}
+
template<ObjectContents object_contents, SizeRestriction size_restriction>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
@@ -1447,14 +1386,13 @@ class ScavengingVisitor : public StaticVisitorBase {
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRaw(object_size,
- NOT_EXECUTABLE);
+ maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
} else {
if (object_contents == DATA_OBJECT) {
maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1476,36 +1414,13 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
- MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
- Object* result = allocation->ToObjectUnchecked();
-
+ Object* result =
+ heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
*slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
return;
}
- static inline void EvacuateJSFunction(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>(map, slot, object);
-
- HeapObject* target = *slot;
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- if (Marking::IsBlack(mark_bit)) {
- // This object is black and it might not be rescanned by marker.
- // We should explicitly record code entry slot for compaction because
- // promotion queue processing (IterateAndMarkPointersToFromSpace) will
- // miss it as it is not HeapObject-tagged.
- Address code_entry_slot =
- target->address() + JSFunction::kCodeEntryOffset;
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->
- RecordCodeEntrySlot(code_entry_slot, code);
- }
- }
-
-
static inline void EvacuateFixedArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -1564,17 +1479,14 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
- Heap* heap = map->GetHeap();
-
- if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() ==
- heap->empty_string()) {
+ if (ConsString::cast(object)->unchecked_second() ==
+ map->heap()->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
- if (!heap->InNewSpace(first)) {
+ if (!map->heap()->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
@@ -1588,7 +1500,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
- heap->DoScavengeObject(first->map(), slot, first);
+ DoScavengeObject(first->map(), slot, first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
@@ -1619,60 +1531,45 @@ class ScavengingVisitor : public StaticVisitorBase {
};
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
+template<LoggingAndProfiling logging_and_profiling_mode>
VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+ ScavengingVisitor<logging_and_profiling_mode>::table_;
static void InitializeScavengingVisitorsTables() {
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
}
-void Heap::SelectScavengingVisitorsTable() {
- bool logging_and_profiling =
- isolate()->logger()->is_logging() ||
+void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
+ if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
+ // Table was already updated by some isolate.
+ return;
+ }
+
+ if (isolate()->logger()->is_logging() |
CpuProfiler::is_profiling(isolate()) ||
(isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling());
-
- if (!incremental_marking()->IsMarking()) {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
- } else {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
+ isolate()->heap_profiler()->is_profiling())) {
+ // If one of the isolates is doing scavenge at this moment of time
+ // it might see this table in an inconsitent state when
+ // some of the callbacks point to
+ // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
+ // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
+ // However this does not lead to any bugs as such isolate does not have
+ // profiling enabled and any isolate with enabled profiling is guaranteed
+ // to see the table in the consistent state.
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
- if (incremental_marking()->IsCompacting()) {
- // When compacting forbid short-circuiting of cons-strings.
- // Scavenging code relies on the fact that new space object
- // can't be evacuated into evacuation candidate but
- // short-circuiting violates this assumption.
- scavenging_visitors_table_.Register(
- StaticVisitorBase::kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(
- StaticVisitorBase::kVisitConsString));
- }
+ // We use Release_Store to prevent reordering of this write before writes
+ // to the table.
+ Release_Store(&scavenging_visitors_table_mode_,
+ LOGGING_AND_PROFILING_ENABLED);
}
}
@@ -1682,7 +1579,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
- map->GetHeap()->DoScavengeObject(map, p, object);
+ DoScavengeObject(map, p, object);
}
@@ -1708,9 +1605,7 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
}
-MaybeObject* Heap::AllocateMap(InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawMap();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1732,7 +1627,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- map->set_elements_kind(elements_kind);
+ map->set_elements_kind(FAST_ELEMENTS);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -1812,19 +1707,12 @@ bool Heap::CreateInitialMaps() {
}
set_empty_fixed_array(FixedArray::cast(obj));
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
+ { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_null_value(Oddball::cast(obj));
+ set_null_value(obj);
Oddball::cast(obj)->set_kind(Oddball::kNull);
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undefined_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- ASSERT(!InNewSpace(undefined_value()));
-
// Allocate the empty descriptor array.
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -1910,12 +1798,6 @@ bool Heap::CreateInitialMaps() {
}
set_byte_array_map(Map::cast(obj));
- { MaybeObject* maybe_obj =
- AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_free_space_map(Map::cast(obj));
-
{ MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2116,7 +1998,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
Object* result;
- { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
+ { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2129,13 +2011,7 @@ bool Heap::CreateApiObjects() {
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- // Don't use Smi-only elements optimizations for objects with the neander
- // map. There are too many cases where element values are set directly with a
- // bottleneck to trap the Smi-only -> fast elements transition, and there
- // appears to be no benefit for optimize this case.
- Map* new_neander_map = Map::cast(obj);
- new_neander_map->set_elements_kind(FAST_ELEMENTS);
- set_neander_map(new_neander_map);
+ set_neander_map(Map::cast(obj));
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2180,12 +2056,6 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime();
}
@@ -2196,18 +2066,20 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_minus_zero_value(HeapNumber::cast(obj));
+ set_minus_zero_value(obj);
ASSERT(signbit(minus_zero_value()->Number()) != 0);
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_nan_value(HeapNumber::cast(obj));
+ set_nan_value(obj);
- { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
+ { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_infinity_value(HeapNumber::cast(obj));
+ set_undefined_value(obj);
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+ ASSERT(!InNewSpace(undefined_value()));
// Allocate initial symbol table.
{ MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
@@ -2216,17 +2088,19 @@ bool Heap::CreateInitialObjects() {
// Don't use set_symbol_table() due to asserts.
roots_[kSymbolTableRootIndex] = obj;
- // Finish initializing oddballs after creating symboltable.
- { MaybeObject* maybe_obj =
- undefined_value()->Initialize("undefined",
- nan_value(),
- Oddball::kUndefined);
- if (!maybe_obj->ToObject(&obj)) return false;
+ // Assign the print strings for oddballs after creating symboltable.
+ Object* symbol;
+ { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
+ if (!maybe_symbol->ToObject(&symbol)) return false;
}
+ Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
+ Oddball::cast(undefined_value())->set_to_number(nan_value());
- // Initialize the null_value.
+ // Allocate the null_value
{ MaybeObject* maybe_obj =
- null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
+ Oddball::cast(null_value())->Initialize("null",
+ Smi::FromInt(0),
+ Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2235,51 +2109,43 @@ bool Heap::CreateInitialObjects() {
Oddball::kTrue);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_true_value(Oddball::cast(obj));
+ set_true_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("false",
Smi::FromInt(0),
Oddball::kFalse);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_false_value(Oddball::cast(obj));
+ set_false_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("hole",
Smi::FromInt(-1),
Oddball::kTheHole);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_the_hole_value(Oddball::cast(obj));
+ set_the_hole_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-2),
+ Smi::FromInt(-4),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_arguments_marker(Oddball::cast(obj));
+ set_arguments_marker(obj);
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-3),
+ Smi::FromInt(-2),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-4),
+ Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
- { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
- Smi::FromInt(-5),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_frame_alignment_marker(Oddball::cast(obj));
- STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
-
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2556,15 +2422,6 @@ MaybeObject* Heap::NumberToString(Object* number,
}
-MaybeObject* Heap::Uint32ToString(uint32_t value,
- bool check_number_string_cache) {
- Object* number;
- MaybeObject* maybe = NumberFromUint32(value);
- if (!maybe->To<Object>(&number)) return maybe;
- return NumberToString(number, check_number_string_cache);
-}
-
-
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}
@@ -2880,23 +2737,25 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// Make an attempt to flatten the buffer to reduce access time.
buffer = buffer->TryFlattenGetString();
+ // TODO(1626): For now slicing external strings is not supported. However,
+ // a flat cons string can have an external string as first part in some cases.
+ // Therefore we have to single out this case as well.
if (!FLAG_string_slices ||
- !buffer->IsFlat() ||
+ (buffer->IsConsString() &&
+ (!buffer->IsFlat() ||
+ !ConsString::cast(buffer)->first()->IsSeqString())) ||
+ buffer->IsExternalString() ||
length < SlicedString::kMinLength ||
pretenure == TENURED) {
Object* result;
- // WriteToFlat takes care of the case when an indirect string has a
- // different encoding from its underlying string. These encodings may
- // differ because of externalization.
- bool is_ascii = buffer->IsAsciiRepresentation();
- { MaybeObject* maybe_result = is_ascii
- ? AllocateRawAsciiString(length, pretenure)
- : AllocateRawTwoByteString(length, pretenure);
+ { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
+ ? AllocateRawAsciiString(length, pretenure)
+ : AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
- if (is_ascii) {
+ if (buffer->IsAsciiRepresentation()) {
ASSERT(string_result->IsAsciiRepresentation());
char* dest = SeqAsciiString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
@@ -2909,17 +2768,12 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
}
ASSERT(buffer->IsFlat());
+ ASSERT(!buffer->IsExternalString());
#if DEBUG
buffer->StringVerify();
#endif
Object* result;
- // When slicing an indirect string we use its encoding for a newly created
- // slice and don't check the encoding of the underlying string. This is safe
- // even if the encodings are different because of externalization. If an
- // indirect ASCII string is pointing to a two-byte string, the two-byte char
- // codes of the underlying string must still fit into ASCII (because
- // externalization must not change char codes).
{ Map* map = buffer->IsAsciiRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
@@ -2945,14 +2799,13 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
sliced_string->set_parent(buffer);
sliced_string->set_offset(start);
}
- ASSERT(sliced_string->parent()->IsSeqString() ||
- sliced_string->parent()->IsExternalString());
+ ASSERT(sliced_string->parent()->IsSeqString());
return result;
}
MaybeObject* Heap::AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
+ ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
@@ -2975,7 +2828,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
MaybeObject* Heap::AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource) {
+ ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
@@ -3039,7 +2892,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* result;
{ MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+ : lo_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -3075,8 +2928,8 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
} else if (size == 2 * kPointerSize) {
filler->set_map(two_pointer_filler_map());
} else {
- filler->set_map(free_space_map());
- FreeSpace::cast(filler)->set_size(size);
+ filler->set_map(byte_array_map());
+ ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
}
}
@@ -3122,7 +2975,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ maybe_result = lo_space_->AllocateRawCode(obj_size);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -3167,7 +3020,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ maybe_result = lo_space_->AllocateRawCode(obj_size);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -3210,7 +3063,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
MaybeObject* maybe_result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
+ maybe_result = lo_space_->AllocateRawCode(new_obj_size);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
}
@@ -3259,9 +3112,9 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
}
-void Heap::InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype) {
+MaybeObject* Heap::InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype) {
ASSERT(!prototype->IsMap());
function->initialize_properties();
function->initialize_elements();
@@ -3271,6 +3124,7 @@ void Heap::InitializeFunction(JSFunction* function,
function->set_context(undefined_value());
function->set_literals(empty_fixed_array());
function->set_next_function_link(undefined_value());
+ return function;
}
@@ -3280,18 +3134,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// different context.
JSFunction* object_function =
function->context()->global_context()->object_function();
-
- // Each function prototype gets a copy of the object function map.
- // This avoid unwanted sharing of maps between prototypes of different
- // constructors.
- Map* new_map;
- ASSERT(object_function->has_initial_map());
- { MaybeObject* maybe_map =
- object_function->initial_map()->CopyDropTransitions();
- if (!maybe_map->To<Map>(&new_map)) return maybe_map;
- }
Object* prototype;
- { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
+ { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
// When creating the prototype for the function we must set its
@@ -3316,8 +3160,7 @@ MaybeObject* Heap::AllocateFunction(Map* function_map,
{ MaybeObject* maybe_result = Allocate(function_map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- InitializeFunction(JSFunction::cast(result), shared, prototype);
- return result;
+ return InitializeFunction(JSFunction::cast(result), shared, prototype);
}
@@ -3487,9 +3330,6 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
// with undefined_value.
- // Pre-allocated fields need to be initialized with undefined_value as well
- // so that object accesses before the constructor completes (e.g. in the
- // debugger) will not cause a crash.
if (map->constructor()->IsJSFunction() &&
JSFunction::cast(map->constructor())->shared()->
IsInobjectSlackTrackingInProgress()) {
@@ -3499,7 +3339,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
} else {
filler = Heap::undefined_value();
}
- obj->InitializeBody(map, Heap::undefined_value(), filler);
+ obj->InitializeBody(map->instance_size(), filler);
}
@@ -3537,8 +3377,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
- JSObject::cast(obj)->HasFastElements());
+ ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -3581,7 +3420,6 @@ MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
result->set_handler(handler);
- result->set_hash(undefined_value());
return result;
}
@@ -3605,7 +3443,6 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
result->set_handler(handler);
- result->set_hash(undefined_value());
result->set_call_trap(call_trap);
result->set_construct_trap(construct_trap);
return result;
@@ -3722,7 +3559,6 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
object_size);
}
- ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
@@ -3755,13 +3591,13 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
MaybeObject* Heap::ReinitializeJSReceiver(
JSReceiver* object, InstanceType type, int size) {
- ASSERT(type >= FIRST_JS_OBJECT_TYPE);
+ ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
// Allocate fresh map.
// TODO(rossberg): Once we optimize proxies, cache these maps.
Map* map;
- MaybeObject* maybe = AllocateMap(type, size);
- if (!maybe->To<Map>(&map)) return maybe;
+ MaybeObject* maybe_map_obj = AllocateMap(type, size);
+ if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
// Check that the receiver has at least the size of the fresh object.
int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3772,35 +3608,30 @@ MaybeObject* Heap::ReinitializeJSReceiver(
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
Object* properties;
- maybe = AllocateFixedArray(prop_size, TENURED);
- if (!maybe->ToObject(&properties)) return maybe;
-
- // Functions require some allocation, which might fail here.
- SharedFunctionInfo* shared = NULL;
- if (type == JS_FUNCTION_TYPE) {
- String* name;
- maybe = LookupAsciiSymbol("<freezing call trap>");
- if (!maybe->To<String>(&name)) return maybe;
- maybe = AllocateSharedFunctionInfo(name);
- if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
+ if (!maybe_properties->ToObject(&properties)) return maybe_properties;
}
- // Because of possible retries of this function after failure,
- // we must NOT fail after this point, where we have changed the type!
-
// Reset the map for the object.
object->set_map(map);
- JSObject* jsobj = JSObject::cast(object);
// Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
+ InitializeJSObjectFromMap(JSObject::cast(object),
+ FixedArray::cast(properties), map);
// Functions require some minimal initialization.
if (type == JS_FUNCTION_TYPE) {
- map->set_function_with_prototype(true);
- InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
- JSFunction::cast(object)->set_context(
- isolate()->context()->global_context());
+ String* name;
+ MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
+ if (!maybe_name->To<String>(&name)) return maybe_name;
+ SharedFunctionInfo* shared;
+ MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
+ if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
+ JSFunction* func;
+ MaybeObject* maybe_func =
+ InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+ if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
+ func->set_context(isolate()->context()->global_context());
}
// Put in filler if the new object is smaller than the old.
@@ -3983,7 +3814,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4100,7 +3931,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) {
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+ : lo_space_->AllocateRawFixedArray(size);
}
@@ -4431,21 +4262,6 @@ STRUCT_LIST(MAKE_CASE)
}
-bool Heap::IsHeapIterable() {
- return (!old_pointer_space()->was_swept_conservatively() &&
- !old_data_space()->was_swept_conservatively());
-}
-
-
-void Heap::EnsureHeapIsIterable() {
- ASSERT(IsAllocationAllowed());
- if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask);
- }
- ASSERT(IsHeapIterable());
-}
-
-
bool Heap::IdleNotification() {
static const int kIdlesBeforeScavenge = 4;
static const int kIdlesBeforeMarkSweep = 7;
@@ -4476,7 +4292,7 @@ bool Heap::IdleNotification() {
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
} else {
CollectGarbage(NEW_SPACE);
}
@@ -4488,12 +4304,12 @@ bool Heap::IdleNotification() {
// generated code for cached functions.
isolate_->compilation_cache()->Clear();
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(true);
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
@@ -4503,7 +4319,7 @@ bool Heap::IdleNotification() {
contexts_disposed_ = 0;
} else {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
@@ -4523,11 +4339,8 @@ bool Heap::IdleNotification() {
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
- // Take into account that we might have decided to delay full collection
- // because incremental marking is in progress.
- ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
+ ASSERT(contexts_disposed_ == 0);
if (uncommit) UncommitFromSpace();
-
return finished;
}
@@ -4561,11 +4374,11 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
+ PrintF("mark-compact GC : %d\n", mc_count_);
PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
- PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4642,18 +4455,69 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p) {
+}
+
+
+static void VerifyPointersUnderWatermark(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ Address start = page->ObjectAreaStart();
+ Address end = page->AllocationWatermark();
+
+ HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ start,
+ end,
+ visit_dirty_region,
+ &DummyScavengePointer);
+ }
+}
+
+
+static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ if (object->IsFixedArray()) {
+ Address slot_address = object->address();
+ Address end = object->address() + object->Size();
+
+ while (slot_address < end) {
+ HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+ // When we are not in GC the Heap::InNewSpace() predicate
+ // checks that pointers which satisfy predicate point into
+ // the active semispace.
+ HEAP->InNewSpace(*slot);
+ slot_address += kPointerSize;
+ }
+ }
+ }
+}
+
+
void Heap::Verify() {
ASSERT(HasBeenSetup());
- store_buffer()->Verify();
-
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
new_space_.Verify();
- old_pointer_space_->Verify(&visitor);
- map_space_->Verify(&visitor);
+ VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
+ old_pointer_space_->Verify(&dirty_regions_visitor);
+ map_space_->Verify(&dirty_regions_visitor);
+
+ VerifyPointersUnderWatermark(old_pointer_space_,
+ &IteratePointersInDirtyRegion);
+ VerifyPointersUnderWatermark(map_space_,
+ &IteratePointersInDirtyMapsRegion);
+ VerifyPointersUnderWatermark(lo_space_);
+
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
VerifyPointersVisitor no_dirty_regions_visitor;
old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4662,7 +4526,6 @@ void Heap::Verify() {
lo_space_->Verify();
}
-
#endif // DEBUG
@@ -4758,221 +4621,275 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
- NewSpacePageIterator it(new_space_.FromSpaceStart(),
- new_space_.FromSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- for (Address cursor = page->body(), limit = page->body_limit();
- cursor < limit;
- cursor += kPointerSize) {
- Memory::Address_at(cursor) = kFromSpaceZapValue;
- }
+ ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
+ for (Address a = new_space_.FromSpaceLow();
+ a < new_space_.FromSpaceHigh();
+ a += kPointerSize) {
+ Memory::Address_at(a) = kFromSpaceZapValue;
}
}
#endif // DEBUG
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
+bool Heap::IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
Address slot_address = start;
-
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
- record_slots = Marking::IsBlack(mark_bit);
- }
+ bool pointers_to_new_space_found = false;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
- // If the store buffer becomes overfull we mark pages as being exempt from
- // the store buffer. These pages are scanned to find pointers that point
- // to the new space. In that case we may hit newly promoted objects and
- // fix the pointers before the promotion queue gets to them. Thus the 'if'.
- if (object->IsHeapObject()) {
- if (Heap::InFromSpace(object)) {
- callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(object));
- Object* new_object = *slot;
- if (InNewSpace(new_object)) {
- ASSERT(Heap::InToSpace(new_object));
- ASSERT(new_object->IsHeapObject());
- store_buffer_.EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
- }
- ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
- } else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(object)) {
- mark_compact_collector()->RecordSlot(slot, slot, object);
+ if (heap->InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ copy_object_func(reinterpret_cast<HeapObject**>(slot));
+ if (heap->InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ pointers_to_new_space_found = true;
}
}
slot_address += kPointerSize;
}
+ return pointers_to_new_space_found;
}
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+ Address page = Page::FromAddress(addr)->ObjectAreaStart();
+ return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
-bool IsAMapPointerAddress(Object** addr) {
- uintptr_t a = reinterpret_cast<uintptr_t>(addr);
- int mod = a % Map::kSize;
- return mod >= Map::kPointerFieldsBeginOffset &&
- mod < Map::kPointerFieldsEndOffset;
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+ Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+ return page + ((addr - page) / Map::kSize * Map::kSize);
}
-bool EverythingsAPointer(Object** addr) {
- return true;
-}
+static bool IteratePointersInDirtyMaps(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ ASSERT(MapStartAlign(start) == start);
+ ASSERT(MapEndAlign(end) == end);
+ Address map_address = start;
+ bool pointers_to_new_space_found = false;
-static void CheckStoreBuffer(Heap* heap,
- Object** current,
- Object** limit,
- Object**** store_buffer_position,
- Object*** store_buffer_top,
- CheckStoreBufferFilter filter,
- Address special_garbage_start,
- Address special_garbage_end) {
- Map* free_space_map = heap->free_space_map();
- for ( ; current < limit; current++) {
- Object* o = *current;
- Address current_address = reinterpret_cast<Address>(current);
- // Skip free space.
- if (o == free_space_map) {
- Address current_address = reinterpret_cast<Address>(current);
- FreeSpace* free_space =
- FreeSpace::cast(HeapObject::FromAddress(current_address));
- int skip = free_space->Size();
- ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
- ASSERT(skip > 0);
- current_address += skip - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- // Skip the current linear allocation space between top and limit which is
- // unmarked with the free space map, but can contain junk.
- if (current_address == special_garbage_start &&
- special_garbage_end != special_garbage_start) {
- current_address = special_garbage_end - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- if (!(*filter)(current)) continue;
- ASSERT(current_address < special_garbage_start ||
- current_address >= special_garbage_end);
- ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
- // We have to check that the pointer does not point into new space
- // without trying to cast it to a heap object since the hash field of
- // a string can contain values like 1 and 3 which are tagged null
- // pointers.
- if (!heap->InNewSpace(o)) continue;
- while (**store_buffer_position < current &&
- *store_buffer_position < store_buffer_top) {
- (*store_buffer_position)++;
- }
- if (**store_buffer_position != current ||
- *store_buffer_position == store_buffer_top) {
- Object** obj_start = current;
- while (!(*obj_start)->IsMap()) obj_start--;
- UNREACHABLE();
+ Heap* heap = HEAP;
+ while (map_address < end) {
+ ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(Memory::Object_at(map_address)->IsMap());
+
+ Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+ Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+ if (Heap::IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)) {
+ pointers_to_new_space_found = true;
}
+
+ map_address += Map::kSize;
}
+
+ return pointers_to_new_space_found;
}
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
- OldSpace* space = old_pointer_space();
- PageIterator pages(space);
+bool Heap::IteratePointersInDirtyMapsRegion(
+ Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ Address map_aligned_start = MapStartAlign(start);
+ Address map_aligned_end = MapEndAlign(end);
- store_buffer()->SortUniq();
+ bool contains_pointers_to_new_space = false;
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+ if (map_aligned_start != start) {
+ Address prev_map = map_aligned_start - Map::kSize;
+ ASSERT(Memory::Object_at(prev_map)->IsMap());
- Address end = page->ObjectAreaEnd();
+ Address pointer_fields_start =
+ Max(start, prev_map + Map::kPointerFieldsBeginOffset);
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
+ Address pointer_fields_end =
+ Min(prev_map + Map::kPointerFieldsEndOffset, end);
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- space->top(),
- space->limit());
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
}
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyMaps(map_aligned_start,
+ map_aligned_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+
+ if (map_aligned_end != end) {
+ ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
+
+ Address pointer_fields_start =
+ map_aligned_end + Map::kPointerFieldsBeginOffset;
+
+ Address pointer_fields_end =
+ Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+ }
+
+ return contains_pointers_to_new_space;
}
-void Heap::MapSpaceCheckStoreBuffer() {
- MapSpace* space = map_space();
- PageIterator pages(space);
+void Heap::IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
+ Address slot_address = start;
+ Page* page = Page::FromAddress(start);
- store_buffer()->SortUniq();
+ uint32_t marks = page->GetRegionMarks();
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+ while (slot_address < end) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (InFromSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ callback(reinterpret_cast<HeapObject**>(slot));
+ if (InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ marks |= page->GetRegionMaskForAddress(slot_address);
+ }
+ }
+ slot_address += kPointerSize;
+ }
+
+ page->SetRegionMarks(marks);
+}
- Address end = page->ObjectAreaEnd();
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
+uint32_t Heap::IterateDirtyRegions(
+ uint32_t marks,
+ Address area_start,
+ Address area_end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func) {
+ uint32_t newmarks = 0;
+ uint32_t mask = 1;
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &IsAMapPointerAddress,
- space->top(),
- space->limit());
+ if (area_start >= area_end) {
+ return newmarks;
}
+
+ Address region_start = area_start;
+
+ // area_start does not necessarily coincide with start of the first region.
+ // Thus to calculate the beginning of the next region we have to align
+ // area_start by Page::kRegionSize.
+ Address second_region =
+ reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
+ ~Page::kRegionAlignmentMask);
+
+ // Next region might be beyond area_end.
+ Address region_end = Min(second_region, area_end);
+
+ if (marks & mask) {
+ if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ mask <<= 1;
+
+ // Iterate subsequent regions which fully lay inside [area_start, area_end[.
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ while (region_end <= area_end) {
+ if (marks & mask) {
+ if (visit_dirty_region(this,
+ region_start,
+ region_end,
+ copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ mask <<= 1;
+ }
+
+ if (region_start != area_end) {
+ // A small piece of area left uniterated because area_end does not coincide
+ // with region end. Check whether region covering last part of area is
+ // dirty.
+ if (marks & mask) {
+ if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ }
+
+ return newmarks;
}
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
- LargeObjectIterator it(lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays can possibly contain pointers to
- // the young generation.
- if (object->IsFixedArray()) {
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
- Object** current = reinterpret_cast<Object**>(object->address());
- Object** limit =
- reinterpret_cast<Object**>(object->address() + object->Size());
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- NULL,
- NULL);
+
+void Heap::IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func,
+ ExpectedPageWatermarkState expected_page_watermark_state) {
+
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ uint32_t marks = page->GetRegionMarks();
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ Address start = page->ObjectAreaStart();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ ASSERT(space == old_pointer_space_ ||
+ (space == map_space_ &&
+ ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+
+ page->SetRegionMarks(IterateDirtyRegions(marks,
+ start,
+ end,
+ visit_dirty_region,
+ copy_object_func));
}
+
+ // Mark page watermark as invalid to maintain watermark validity invariant.
+ // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
+ page->InvalidateWatermark(true);
}
}
-#endif
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
@@ -5024,7 +4941,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
+ if (mode != VISIT_ALL_IN_SCAVENGE &&
+ mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
isolate_->builtins()->IterateBuiltins(v);
}
v->Synchronize("builtins");
@@ -5068,20 +4986,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
bool Heap::ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size) {
+ int max_old_gen_size,
+ int max_executable_size) {
if (HasBeenSetup()) return false;
- if (max_semispace_size > 0) {
- if (max_semispace_size < Page::kPageSize) {
- max_semispace_size = Page::kPageSize;
- if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be less than %dkbytes\n",
- Page::kPageSize >> 10);
- }
- }
- max_semispace_size_ = max_semispace_size;
- }
+ if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
if (Snapshot::IsEnabled()) {
// If we are using a snapshot we always reserve the default amount
@@ -5091,10 +5000,6 @@ bool Heap::ConfigureHeap(int max_semispace_size,
// than the default reserved semispace size.
if (max_semispace_size_ > reserved_semispace_size_) {
max_semispace_size_ = reserved_semispace_size_;
- if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be more than %dkbytes\n",
- reserved_semispace_size_ >> 10);
- }
}
} else {
// If we are not using snapshots we reserve space for the actual
@@ -5120,12 +5025,8 @@ bool Heap::ConfigureHeap(int max_semispace_size,
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
external_allocation_limit_ = 10 * max_semispace_size_;
- // The old generation is paged and needs at least one page for each space.
- int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
- Page::kPageSize),
- RoundUp(max_old_generation_size_,
- Page::kPageSize));
+ // The old generation is paged.
+ max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
configured_ = true;
return true;
@@ -5133,9 +5034,9 @@ bool Heap::ConfigureHeap(int max_semispace_size,
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
- static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
- static_cast<intptr_t>(FLAG_max_executable_size) * MB);
+ return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
+ FLAG_max_old_space_size * MB,
+ FLAG_max_executable_size * MB);
}
@@ -5163,7 +5064,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->os_error = OS::GetLastError();
isolate()->memory_allocator()->Available();
if (take_snapshot) {
- HeapIterator iterator;
+ HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
@@ -5379,21 +5280,31 @@ bool Heap::Setup(bool create_heap_objects) {
gc_initializer_mutex->Lock();
static bool initialized_gc = false;
if (!initialized_gc) {
- initialized_gc = true;
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
+ initialized_gc = true;
+ InitializeScavengingVisitorsTables();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
}
gc_initializer_mutex->Unlock();
MarkMapPointersAsEncoded(false);
- // Setup memory allocator.
+ // Setup memory allocator and reserve a chunk of memory for new
+ // space. The chunk is double the size of the requested reserved
+ // new space size to ensure that we can find a pair of semispaces that
+ // are contiguous and aligned to their size.
if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
return false;
-
- // Setup new space.
- if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
+ void* chunk =
+ isolate_->memory_allocator()->ReserveInitialChunk(
+ 4 * reserved_semispace_size_);
+ if (chunk == NULL) return false;
+
+ // Align the pair of semispaces to their size, which must be a power
+ // of 2.
+ Address new_space_start =
+ RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
+ if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
return false;
}
@@ -5404,7 +5315,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->Setup()) return false;
+ if (!old_pointer_space_->Setup(NULL, 0)) return false;
// Initialize old data space.
old_data_space_ =
@@ -5413,7 +5324,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_DATA_SPACE,
NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
- if (!old_data_space_->Setup()) return false;
+ if (!old_data_space_->Setup(NULL, 0)) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
@@ -5428,20 +5339,21 @@ bool Heap::Setup(bool create_heap_objects) {
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
- if (!code_space_->Setup()) return false;
+ if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(this,
- max_old_generation_size_,
- FLAG_max_map_space_pages,
- MAP_SPACE);
+ map_space_ = new MapSpace(this, FLAG_use_big_map_space
+ ? max_old_generation_size_
+ : MapSpace::kMaxMapPageIndex * Page::kPageSize,
+ FLAG_max_map_space_pages,
+ MAP_SPACE);
if (map_space_ == NULL) return false;
- if (!map_space_->Setup()) return false;
+ if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
- if (!cell_space_->Setup()) return false;
+ if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
@@ -5449,6 +5361,7 @@ bool Heap::Setup(bool create_heap_objects) {
lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
+
if (create_heap_objects) {
// Create initial maps.
if (!CreateInitialMaps()) return false;
@@ -5463,8 +5376,6 @@ bool Heap::Setup(bool create_heap_objects) {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
- store_buffer()->Setup();
-
return true;
}
@@ -5491,6 +5402,7 @@ void Heap::TearDown() {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
+ PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", get_max_gc_pause());
PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5540,9 +5452,6 @@ void Heap::TearDown() {
lo_space_ = NULL;
}
- store_buffer()->TearDown();
- incremental_marking()->TearDown();
-
isolate_->memory_allocator()->TearDown();
#ifdef DEBUG
@@ -5556,7 +5465,7 @@ void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->ReleaseAllUnusedPages();
+ space->Shrink();
}
@@ -5759,6 +5668,45 @@ class HeapObjectsFilter {
};
+class FreeListNodesFilter : public HeapObjectsFilter {
+ public:
+ FreeListNodesFilter() {
+ MarkFreeListNodes();
+ }
+
+ bool SkipObject(HeapObject* object) {
+ if (object->IsMarked()) {
+ object->ClearMark();
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private:
+ void MarkFreeListNodes() {
+ Heap* heap = HEAP;
+ heap->old_pointer_space()->MarkFreeListNodes();
+ heap->old_data_space()->MarkFreeListNodes();
+ MarkCodeSpaceFreeListNodes(heap);
+ heap->map_space()->MarkFreeListNodes();
+ heap->cell_space()->MarkFreeListNodes();
+ }
+
+ void MarkCodeSpaceFreeListNodes(Heap* heap) {
+ // For code space, using FreeListNode::IsFreeListNode is OK.
+ HeapObjectIterator iter(heap->code_space());
+ for (HeapObject* obj = iter.next_object();
+ obj != NULL;
+ obj = iter.next_object()) {
+ if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
+ }
+ }
+
+ AssertNoAllocation no_alloc;
+};
+
+
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
UnreachableObjectsFilter() {
@@ -5766,8 +5714,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
bool SkipObject(HeapObject* object) {
- if (IntrusiveMarking::IsMarked(object)) {
- IntrusiveMarking::ClearMark(object);
+ if (object->IsMarked()) {
+ object->ClearMark();
return true;
} else {
return false;
@@ -5783,8 +5731,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- if (IntrusiveMarking::IsMarked(obj)) {
- IntrusiveMarking::ClearMark(obj);
+ if (obj->IsMarked()) {
+ obj->ClearMark();
list_.Add(obj);
}
}
@@ -5806,7 +5754,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
- IntrusiveMarking::SetMark(obj);
+ obj->SetMark();
}
UnmarkingVisitor visitor;
HEAP->IterateRoots(&visitor, VISIT_ALL);
@@ -5840,11 +5788,10 @@ HeapIterator::~HeapIterator() {
void HeapIterator::Init() {
// Start the iteration.
space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
- new SpaceIterator(Isolate::Current()->heap()->
- GcSafeSizeOfOldObjectFunction());
+ new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
switch (filtering_) {
case kFilterFreeListNodes:
- // TODO(gc): Not handled.
+ filter_ = new FreeListNodesFilter;
break;
case kFilterUnreachable:
filter_ = new UnreachableObjectsFilter;
@@ -5981,11 +5928,6 @@ void PathTracer::TracePathFrom(Object** root) {
}
-static bool SafeIsGlobalContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
-}
-
-
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
if (!(*p)->IsHeapObject()) return;
@@ -6004,7 +5946,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
return;
}
- bool is_global_context = SafeIsGlobalContext(obj);
+ bool is_global_context = obj->IsGlobalContext();
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6112,7 +6054,7 @@ static intptr_t CountTotalHolesSize() {
for (OldSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
- holes_size += space->Waste() + space->Available();
+ holes_size += space->Waste() + space->AvailableFree();
}
return holes_size;
}
@@ -6123,10 +6065,17 @@ GCTracer::GCTracer(Heap* heap)
start_size_(0),
gc_count_(0),
full_gc_count_(0),
+ is_compacting_(false),
+ marked_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
heap_(heap) {
+ // These two fields reflect the state of the previous full collection.
+ // Set them before they are changed by the collector.
+ previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
+ previous_marked_count_ =
+ heap_->mark_compact_collector_.previous_marked_count();
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = heap_->SizeOfObjects();
@@ -6143,14 +6092,6 @@ GCTracer::GCTracer(Heap* heap)
if (heap_->last_gc_end_timestamp_ > 0) {
spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
}
-
- steps_count_ = heap_->incremental_marking()->steps_count();
- steps_took_ = heap_->incremental_marking()->steps_took();
- longest_step_ = heap_->incremental_marking()->longest_step();
- steps_count_since_last_gc_ =
- heap_->incremental_marking()->steps_count_since_last_gc();
- steps_took_since_last_gc_ =
- heap_->incremental_marking()->steps_took_since_last_gc();
}
@@ -6185,21 +6126,7 @@ GCTracer::~GCTracer() {
SizeOfHeapObjects());
if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms", time);
- if (steps_count_ > 0) {
- if (collector_ == SCAVENGER) {
- PrintF(" (+ %d ms in %d steps since last GC)",
- static_cast<int>(steps_took_since_last_gc_),
- steps_count_since_last_gc_);
- } else {
- PrintF(" (+ %d ms in %d steps since start of marking, "
- "biggest step %f ms)",
- static_cast<int>(steps_took_),
- steps_count_,
- longest_step_);
- }
- }
- PrintF(".\n");
+ PrintF("%d ms.\n", time);
} else {
PrintF("pause=%d ", time);
PrintF("mutator=%d ",
@@ -6211,7 +6138,8 @@ GCTracer::~GCTracer() {
PrintF("s");
break;
case MARK_COMPACTOR:
- PrintF("ms");
+ PrintF("%s",
+ heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
break;
default:
UNREACHABLE();
@@ -6233,14 +6161,6 @@ GCTracer::~GCTracer() {
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
- if (collector_ == SCAVENGER) {
- PrintF("stepscount=%d ", steps_count_since_last_gc_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
- } else {
- PrintF("stepscount=%d ", steps_count_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_));
- }
-
PrintF("\n");
}
@@ -6253,7 +6173,8 @@ const char* GCTracer::CollectorString() {
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
- return "Mark-sweep";
+ return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
+ : "Mark-sweep";
}
return "Unknown GC";
}
@@ -6360,52 +6281,4 @@ void ExternalStringTable::TearDown() {
}
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
- chunk->set_next_chunk(chunks_queued_for_free_);
- chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
- if (chunks_queued_for_free_ == NULL) return;
- MemoryChunk* next;
- MemoryChunk* chunk;
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-
- if (chunk->owner()->identity() == LO_SPACE) {
- // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
- // If FromAnyPointerAddress encounters a slot that belongs to a large
- // chunk queued for deletion it will fail to find the chunk because
- // it try to perform a search in the list of pages owned by of the large
- // object space and queued chunks were detached from that list.
- // To work around this we split large chunk into normal kPageSize aligned
- // pieces and initialize owner field and flags of every piece.
- // If FromAnyPointerAddress encounteres a slot that belongs to one of
- // these smaller pieces it will treat it as a slot on a normal Page.
- MemoryChunk* inner = MemoryChunk::FromAddress(
- chunk->address() + Page::kPageSize);
- MemoryChunk* inner_last = MemoryChunk::FromAddress(
- chunk->address() + chunk->size() - 1);
- while (inner <= inner_last) {
- // Size of a large chunk is always a multiple of
- // OS::AllocationAlignment() so there is always
- // enough space for a fake MemoryChunk header.
- inner->set_owner(lo_space());
- inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
- inner = MemoryChunk::FromAddress(
- inner->address() + Page::kPageSize);
- }
- }
- }
- isolate_->heap()->store_buffer()->Compact();
- isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- isolate_->memory_allocator()->Free(chunk);
- }
- chunks_queued_for_free_ = NULL;
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 6fb2d18c24..d81ff6cad5 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -32,15 +32,11 @@
#include "allocation.h"
#include "globals.h"
-#include "incremental-marking.h"
#include "list.h"
#include "mark-compact.h"
-#include "objects-visiting.h"
#include "spaces.h"
#include "splay-tree-inl.h"
-#include "store-buffer.h"
#include "v8-counters.h"
-#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -52,20 +48,20 @@ inline Heap* _inline_get_heap_();
// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V) \
+#define STRONG_ROOT_LIST(V) \
+ /* Put the byte array map early. We need it to be in place by the time */ \
+ /* the deserializer hits the next page, since it wants to put a byte */ \
+ /* array in the unused space at the end of the page. */ \
V(Map, byte_array_map, ByteArrayMap) \
- V(Map, free_space_map, FreeSpaceMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
- V(Smi, store_buffer_top, StoreBufferTop) \
- V(Oddball, undefined_value, UndefinedValue) \
- V(Oddball, the_hole_value, TheHoleValue) \
- V(Oddball, null_value, NullValue) \
- V(Oddball, true_value, TrueValue) \
- V(Oddball, false_value, FalseValue) \
- V(Oddball, arguments_marker, ArgumentsMarker) \
- V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
+ V(Object, undefined_value, UndefinedValue) \
+ V(Object, the_hole_value, TheHoleValue) \
+ V(Object, null_value, NullValue) \
+ V(Object, true_value, TrueValue) \
+ V(Object, false_value, FalseValue) \
+ V(Object, arguments_marker, ArgumentsMarker) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@@ -126,9 +122,8 @@ inline Heap* _inline_get_heap_();
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
- V(HeapNumber, nan_value, NanValue) \
- V(HeapNumber, infinity_value, InfinityValue) \
- V(HeapNumber, minus_zero_value, MinusZeroValue) \
+ V(Object, nan_value, NanValue) \
+ V(Object, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
@@ -231,9 +226,7 @@ inline Heap* _inline_get_heap_();
V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \
V(dot_symbol, ".") \
- V(anonymous_function_symbol, "(anonymous function)") \
- V(infinity_symbol, "Infinity") \
- V(minus_infinity_symbol, "-Infinity")
+ V(anonymous_function_symbol, "(anonymous function)")
// Forward declarations.
class GCTracer;
@@ -245,26 +238,10 @@ class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
Object** pointer);
-class StoreBufferRebuilder {
- public:
- explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {
- }
-
- void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
- StoreBuffer* store_buffer_;
-
- // We record in this variable how full the store buffer was when we started
- // iterating over the current page, finding pointers to new space. If the
- // store buffer overflows again we can exempt the page from the store buffer
- // by rewinding to this point instead of having to search the store buffer.
- Object*** start_of_current_page_;
- // The current page we are scanning in the store buffer iterator.
- MemoryChunk* current_page_;
-};
-
+typedef bool (*DirtyRegionCallback)(Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func);
// The all static Heap captures the interface to the global object heap.
@@ -282,37 +259,22 @@ class PromotionQueue {
PromotionQueue() : front_(NULL), rear_(NULL) { }
void Initialize(Address start_address) {
- // Assumes that a NewSpacePage exactly fits a number of promotion queue
- // entries (where each is a pair of intptr_t). This allows us to simplify
- // the test fpr when to switch pages.
- ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
- == 0);
- ASSERT(NewSpacePage::IsAtEnd(start_address));
front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
}
- bool is_empty() { return front_ == rear_; }
+ bool is_empty() { return front_ <= rear_; }
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
- ASSERT(!is_empty());
- if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
- NewSpacePage* front_page =
- NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
- ASSERT(!front_page->prev_page()->is_anchor());
- front_ =
- reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
- }
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
// Assert no underflow.
- SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
- reinterpret_cast<Address>(front_));
+ ASSERT(front_ >= rear_);
}
private:
- // The front of the queue is higher in the memory page chain than the rear.
+ // The front of the queue is higher in memory than the rear.
intptr_t* front_;
intptr_t* rear_;
@@ -320,11 +282,6 @@ class PromotionQueue {
};
-typedef void (*ScavengingCallback)(Map* map,
- HeapObject** slot,
- HeapObject* object);
-
-
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
@@ -370,8 +327,8 @@ class Heap {
// Configure heap size before setup. Return false if the heap has been
// setup already.
bool ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size);
+ int max_old_gen_size,
+ int max_executable_size);
bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
@@ -499,7 +456,6 @@ class Heap {
// size, but keeping the original prototype. The receiver must have at least
// the size of the new object. The object is reinitialized and behaves as an
// object that has been freshly allocated.
- // Returns failure if an error occured, otherwise object.
MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
InstanceType type,
int size);
@@ -528,10 +484,8 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateMap(
- InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind = FAST_ELEMENTS);
+ MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
+ int instance_size);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -842,9 +796,9 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource);
+ ExternalAsciiString::Resource* resource);
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource);
+ ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
@@ -931,24 +885,13 @@ class Heap {
// collect more garbage.
inline bool CollectGarbage(AllocationSpace space);
- static const int kNoGCFlags = 0;
- static const int kMakeHeapIterableMask = 1;
-
- // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
- // non-zero, then the slower precise sweeper is used, which leaves the heap
- // in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(int flags);
+ // Performs a full garbage collection. Force compaction if the
+ // parameter is true.
+ void CollectAllGarbage(bool force_compaction);
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage();
- // Check whether the heap is currently iterable.
- bool IsHeapIterable();
-
- // Ensure that we have swept all spaces in such a way that we can iterate
- // over all objects. May cause a GC.
- void EnsureHeapIsIterable();
-
// Notify the heap that a context has been disposed.
int NotifyContextDisposed() { return ++contexts_disposed_; }
@@ -956,20 +899,6 @@ class Heap {
// ensure correct callback for weak global handles.
void PerformScavenge();
- inline void increment_scan_on_scavenge_pages() {
- scan_on_scavenge_pages_++;
- if (FLAG_gc_verbose) {
- PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
- }
- }
-
- inline void decrement_scan_on_scavenge_pages() {
- scan_on_scavenge_pages_--;
- if (FLAG_gc_verbose) {
- PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
- }
- }
-
PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG
@@ -996,8 +925,6 @@ class Heap {
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
- // TODO(1490): Try removing the unchecked accessors, now that GC marking does
- // not corrupt the stack.
#define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
@@ -1031,9 +958,6 @@ class Heap {
}
Object* global_contexts_list() { return global_contexts_list_; }
- // Number of mark-sweeps.
- int ms_count() { return ms_count_; }
-
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
@@ -1041,16 +965,60 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+ enum ExpectedPageWatermarkState {
+ WATERMARK_SHOULD_BE_VALID,
+ WATERMARK_CAN_BE_INVALID
+ };
+
+ // For each dirty region on a page in use from an old space call
+ // visit_dirty_region callback.
+ // If either visit_dirty_region or callback can cause an allocation
+ // in old space and changes in allocation watermark then
+ // can_preallocate_during_iteration should be set to true.
+ // All pages will be marked as having invalid watermark upon
+ // iteration completion.
+ void IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback,
+ ExpectedPageWatermarkState expected_page_watermark_state);
+
+ // Interpret marks as a bitvector of dirty marks for regions of size
+ // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
+ // memory interval from start to top. For each dirty region call a
+ // visit_dirty_region callback. Return updated bitvector of dirty marks.
+ uint32_t IterateDirtyRegions(uint32_t marks,
+ Address start,
+ Address end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback);
+
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
+ // Update dirty marks for page containing start address.
void IterateAndMarkPointersToFromSpace(Address start,
Address end,
ObjectSlotCallback callback);
+ // Iterate pointers to new space found in memory interval from start to end.
+ // Return true if pointers to new space was found.
+ static bool IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback callback);
+
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // This interval is considered to belong to the map space.
+ // Return true if pointers to new space was found.
+ static bool IteratePointersInDirtyMapsRegion(Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback callback);
+
+
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
- inline bool InNewSpace(Address addr);
- inline bool InNewSpacePage(Address addr);
inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object);
@@ -1089,20 +1057,12 @@ class Heap {
roots_[kEmptyScriptRootIndex] = script;
}
- void public_set_store_buffer_top(Address* top) {
- roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
- }
-
// Update the next script id.
inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
Object** roots_address() { return roots_; }
- Address* store_buffer_top_address() {
- return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
- }
-
// Get address of global contexts list for serialization support.
Object** global_contexts_list_address() {
return &global_contexts_list_;
@@ -1115,10 +1075,6 @@ class Heap {
// Verify the heap is in its normal state before or after a GC.
void Verify();
- void OldPointerSpaceCheckStoreBuffer();
- void MapSpaceCheckStoreBuffer();
- void LargeObjectSpaceCheckStoreBuffer();
-
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
@@ -1214,53 +1170,22 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
- inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSize() + PromotedExternalMemorySize();
- }
-
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
- inline bool OldGenerationPromotionLimitReached() {
- return PromotedTotalSize() > old_gen_promotion_limit_;
- }
-
- inline intptr_t OldGenerationSpaceAvailable() {
- return old_gen_allocation_limit_ - PromotedTotalSize();
- }
-
- static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
- static const intptr_t kMinimumAllocationLimit =
- 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
- // When we sweep lazily we initially guess that there is no garbage on the
- // heap and set the limits for the next GC accordingly. As we sweep we find
- // out that some of the pages contained garbage and we have to adjust
- // downwards the size of the heap. This means the limits that control the
- // timing of the next GC also need to be adjusted downwards.
- void LowerOldGenLimits(intptr_t adjustment) {
- size_of_old_gen_at_last_old_space_gc_ -= adjustment;
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+ bool OldGenerationPromotionLimitReached() {
+ return (PromotedSpaceSize() + PromotedExternalMemorySize())
+ > old_gen_promotion_limit_;
}
- intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 10 : 3;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
- limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
- return limit;
+ intptr_t OldGenerationSpaceAvailable() {
+ return old_gen_allocation_limit_ -
+ (PromotedSpaceSize() + PromotedExternalMemorySize());
}
- intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
- const int divisor = FLAG_stress_compaction ? 8 : 2;
- intptr_t limit =
- Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
- limit += new_space_.Capacity();
- limit *= old_gen_limit_factor_;
- return limit;
+ // True if we have reached the allocation limit in the old generation that
+ // should artificially cause a GC right now.
+ bool OldGenerationAllocationLimitReached() {
+ return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
@@ -1288,8 +1213,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
- MUST_USE_RESULT MaybeObject* Uint32ToString(
- uint32_t value, bool check_number_string_cache = true);
Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType(
@@ -1301,10 +1224,18 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
+ inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
+
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
+ inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
+
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
@@ -1313,31 +1244,9 @@ class Heap {
survived_since_last_expansion_ += survived;
}
- inline bool NextGCIsLikelyToBeFull() {
- if (FLAG_gc_global) return true;
-
- intptr_t total_promoted = PromotedTotalSize();
-
- intptr_t adjusted_promotion_limit =
- old_gen_promotion_limit_ - new_space_.Capacity();
-
- if (total_promoted >= adjusted_promotion_limit) return true;
-
- intptr_t adjusted_allocation_limit =
- old_gen_allocation_limit_ - new_space_.Capacity() / 5;
-
- if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
-
- return false;
- }
-
-
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
- void UpdateReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func);
-
void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to
@@ -1354,9 +1263,6 @@ class Heap {
GCTracer* tracer() { return tracer_; }
- // Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSize();
-
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
total_regexp_code_generated_ += size;
@@ -1375,18 +1281,6 @@ class Heap {
return &mark_compact_collector_;
}
- StoreBuffer* store_buffer() {
- return &store_buffer_;
- }
-
- Marking* marking() {
- return &marking_;
- }
-
- IncrementalMarking* incremental_marking() {
- return &incremental_marking_;
- }
-
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@@ -1397,28 +1291,16 @@ class Heap {
}
inline Isolate* isolate();
+ bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
- inline void CallGlobalGCPrologueCallback() {
+ void CallGlobalGCPrologueCallback() {
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
}
- inline void CallGlobalGCEpilogueCallback() {
+ void CallGlobalGCEpilogueCallback() {
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
}
- inline bool OldGenerationAllocationLimitReached();
-
- inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
- scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
- }
-
- void QueueMemoryChunkForFree(MemoryChunk* chunk);
- void FreeQueuedChunks();
-
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
-
private:
Heap();
@@ -1426,12 +1308,12 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
- intptr_t code_range_size_;
int reserved_semispace_size_;
int max_semispace_size_;
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
+ intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1446,8 +1328,6 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
- int scan_on_scavenge_pages_;
-
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
@@ -1464,9 +1344,13 @@ class Heap {
HeapState gc_state_;
int gc_post_processing_depth_;
+ // Returns the size of object residing in non new spaces.
+ intptr_t PromotedSpaceSize();
+
// Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize();
+ int mc_count_; // how many mark-compact collections happened
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@@ -1505,13 +1389,6 @@ class Heap {
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;
- // Sometimes the heuristics dictate that those limits are increased. This
- // variable records that fact.
- int old_gen_limit_factor_;
-
- // Used to adjust the limits that control the timing of the next GC.
- intptr_t size_of_old_gen_at_last_old_space_gc_;
-
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
intptr_t external_allocation_limit_;
@@ -1531,8 +1408,6 @@ class Heap {
Object* global_contexts_list_;
- StoreBufferRebuilder store_buffer_rebuilder_;
-
struct StringTypeTable {
InstanceType type;
int size;
@@ -1590,11 +1465,13 @@ class Heap {
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
+ static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
void MarkMapPointersAsEncoded(bool encoded) {
- ASSERT(!encoded);
- gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
+ gc_safe_size_of_old_object_ = encoded
+ ? &GcSafeSizeOfOldObjectWithEncodedMap
+ : &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
@@ -1606,10 +1483,11 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
+ static const intptr_t kMinimumPromotionLimit = 2 * MB;
+ static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline void UpdateOldSpaceLimits();
-
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@@ -1644,6 +1522,8 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+ void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
// Performs a minor collection in new generation.
void Scavenge();
@@ -1652,15 +1532,16 @@ class Heap {
Object** pointer);
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
- static void ScavengeStoreBufferCallback(Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event);
// Performs a major collection in the whole heap.
void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
- void MarkCompactPrologue();
+ void MarkCompactPrologue(bool is_compacting);
+
+ // Completely clear the Instanceof cache (to stop it keeping objects alive
+ // around a GC).
+ inline void CompletelyClearInstanceofCache();
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
@@ -1670,11 +1551,12 @@ class Heap {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Initializes a function with a shared part and prototype.
+ // Returns the function.
// Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
- inline void InitializeFunction(
+ MUST_USE_RESULT inline MaybeObject* InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
@@ -1739,8 +1621,6 @@ class Heap {
return high_survival_rate_period_length_ > 0;
}
- void SelectScavengingVisitorsTable();
-
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1760,11 +1640,10 @@ class Heap {
MarkCompactCollector mark_compact_collector_;
- StoreBuffer store_buffer_;
-
- Marking marking_;
-
- IncrementalMarking incremental_marking_;
+ // This field contains the meaning of the WATERMARK_INVALIDATED flag.
+ // Instead of clearing this flag from all pages we just flip
+ // its meaning at the beginning of a scavenge.
+ intptr_t page_watermark_invalidated_mark_;
int number_idle_notifications_;
unsigned int last_idle_notification_gc_count_;
@@ -1779,9 +1658,7 @@ class Heap {
ExternalStringTable external_string_table_;
- VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
- MemoryChunk* chunks_queued_for_free_;
+ bool is_safe_to_read_maps_;
friend class Factory;
friend class GCTracer;
@@ -1880,6 +1757,29 @@ class VerifyPointersVisitor: public ObjectVisitor {
}
}
};
+
+
+// Visitor class to verify interior pointers in spaces that use region marks
+// to keep track of intergenerational references.
+// As VerifyPointersVisitor but also checks that dirty marks are set
+// for regions covering intergenerational references.
+class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
+ public:
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ ASSERT(HEAP->Contains(object));
+ ASSERT(object->map()->IsMap());
+ if (HEAP->InNewSpace(object)) {
+ ASSERT(HEAP->InToSpace(object));
+ Address addr = reinterpret_cast<Address>(current);
+ ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
+ }
+ }
+ }
+ }
+};
#endif
@@ -2212,6 +2112,16 @@ class GCTracer BASE_EMBEDDED {
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
+ // Sets the flag that this is a compacting full GC.
+ void set_is_compacting() { is_compacting_ = true; }
+ bool is_compacting() const { return is_compacting_; }
+
+ // Increment and decrement the count of marked objects.
+ void increment_marked_count() { ++marked_count_; }
+ void decrement_marked_count() { --marked_count_; }
+
+ int marked_count() { return marked_count_; }
+
void increment_promoted_objects_size(int object_size) {
promoted_objects_size_ += object_size;
}
@@ -2236,6 +2146,23 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
+ // True if the current GC is a compacting full collection, false
+ // otherwise.
+ bool is_compacting_;
+
+ // True if the *previous* full GC cwas a compacting collection (will be
+ // false if there has not been a previous full GC).
+ bool previous_has_compacted_;
+
+ // On a full GC, a count of the number of marked objects. Incremented
+ // when an object is marked and decremented when an object's mark bit is
+ // cleared. Will be zero on a scavenge collection.
+ int marked_count_;
+
+ // The count from the end of the previous full GC. Will be zero if there
+ // was no previous full GC.
+ int previous_marked_count_;
+
// Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes];
@@ -2254,13 +2181,6 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
- // Incremental marking steps counters.
- int steps_count_;
- double steps_took_;
- double longest_step_;
- int steps_count_since_last_gc_;
- double steps_took_since_last_gc_;
-
Heap* heap_;
};
@@ -2372,46 +2292,6 @@ class WeakObjectRetainer {
};
-// Intrusive object marking uses least significant bit of
-// heap object's map word to mark objects.
-// Normally all map words have least significant bit set
-// because they contain tagged map pointer.
-// If the bit is not set object is marked.
-// All objects should be unmarked before resuming
-// JavaScript execution.
-class IntrusiveMarking {
- public:
- static bool IsMarked(HeapObject* object) {
- return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
- }
-
- static void ClearMark(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
- ASSERT(!IsMarked(object));
- }
-
- static void SetMark(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
- ASSERT(IsMarked(object));
- }
-
- static Map* MapOfMarkedObject(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
- }
-
- static int SizeOfMarkedObject(HeapObject* object) {
- return object->SizeFromMap(MapOfMarkedObject(object));
- }
-
- private:
- static const uintptr_t kNotMarkedBit = 0x1;
- STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
-};
-
-
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
@@ -2470,6 +2350,7 @@ class PathTracer : public ObjectVisitor {
};
#endif // DEBUG || LIVE_OBJECT_LIST
+
} } // namespace v8::internal
#undef HEAP
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index fd0c3bb0d8..5630ce3913 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -707,14 +707,6 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
-void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- HControlInstruction::PrintDataTo(stream);
-}
-
-
void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -785,22 +777,15 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
- HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HTypeof::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
}
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
- stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
+ stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
- if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
}
@@ -872,23 +857,6 @@ void HCheckFunction::PrintDataTo(StringStream* stream) {
}
-const char* HCheckInstanceType::GetCheckName() {
- switch (check_) {
- case IS_SPEC_OBJECT: return "object";
- case IS_JS_ARRAY: return "array";
- case IS_STRING: return "string";
- case IS_SYMBOL: return "symbol";
- }
- UNREACHABLE();
- return "";
-}
-
-void HCheckInstanceType::PrintDataTo(StringStream* stream) {
- stream->Add("%s ", GetCheckName());
- HUnaryOperation::PrintDataTo(stream);
-}
-
-
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@@ -1343,14 +1311,6 @@ void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
- left()->PrintNameTo(stream);
- stream->Add(" ");
- right()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@@ -1465,7 +1425,7 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
}
-bool HLoadKeyedFastElement::RequiresHoleCheck() {
+bool HLoadKeyedFastElement::RequiresHoleCheck() const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
@@ -1482,6 +1442,11 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
}
+bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
+ return true;
+}
+
+
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1523,7 +1488,6 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -1618,7 +1582,6 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -1635,18 +1598,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
- if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
- if (details_.IsReadOnly()) stream->Add(" (read-only)");
-}
-
-
-bool HLoadGlobalCell::RequiresHoleCheck() {
- if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!use->IsChange()) return true;
- }
- return false;
+ if (check_hole_value()) stream->Add(" (deleteable/read-only)");
}
@@ -1658,8 +1610,6 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
- if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
- if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 6b43f53da7..1bc28ba82d 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -118,7 +118,7 @@ class LChunkBuilder;
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
+ V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -625,7 +625,7 @@ class HValue: public ZoneObject {
void ComputeInitialRange();
// Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) = 0;
+ virtual Representation RequiredInputRepresentation(int index) const = 0;
virtual Representation InferredRepresentation() {
return representation();
@@ -841,7 +841,7 @@ class HTemplateControlInstruction: public HControlInstruction {
class HBlockEntry: public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -854,7 +854,7 @@ class HBlockEntry: public HTemplateInstruction<0> {
// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
class HSoftDeoptimize: public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -866,7 +866,7 @@ class HDeoptimize: public HControlInstruction {
public:
explicit HDeoptimize(int environment_length) : values_(environment_length) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -908,10 +908,10 @@ class HDeoptimize: public HControlInstruction {
class HGoto: public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
- }
+ SetSuccessorAt(0, target);
+ }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -951,7 +951,7 @@ class HBranch: public HUnaryControlInstruction {
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -983,7 +983,7 @@ class HCompareMap: public HUnaryControlInstruction {
Handle<Map> map() const { return map_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1000,7 +1000,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1014,7 +1014,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> {
class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1049,7 +1049,7 @@ class HThrow: public HTemplateInstruction<2> {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1064,7 +1064,7 @@ class HUseConst: public HUnaryOperation {
public:
explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1083,7 +1083,7 @@ class HForceRepresentation: public HTemplateInstruction<1> {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return representation(); // Same as the output representation.
}
@@ -1094,27 +1094,27 @@ class HForceRepresentation: public HTemplateInstruction<1> {
class HChange: public HUnaryOperation {
public:
HChange(HValue* value,
+ Representation from,
Representation to,
bool is_truncating,
bool deoptimize_on_undefined)
- : HUnaryOperation(value) {
- ASSERT(!value->representation().IsNone() && !to.IsNone());
- ASSERT(!value->representation().Equals(to));
+ : HUnaryOperation(value),
+ from_(from),
+ deoptimize_on_undefined_(deoptimize_on_undefined) {
+ ASSERT(!from.IsNone() && !to.IsNone());
+ ASSERT(!from.Equals(to));
set_representation(to);
SetFlag(kUseGVN);
- if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
if (is_truncating) SetFlag(kTruncatingToInt32);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- Representation from() { return value()->representation(); }
- Representation to() { return representation(); }
- bool deoptimize_on_undefined() const {
- return CheckFlag(kDeoptimizeOnUndefined);
- }
- virtual Representation RequiredInputRepresentation(int index) {
- return from();
+ Representation from() const { return from_; }
+ Representation to() const { return representation(); }
+ bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return from_;
}
virtual Range* InferRange();
@@ -1124,7 +1124,16 @@ class HChange: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) {
+ if (!other->IsChange()) return false;
+ HChange* change = HChange::cast(other);
+ return to().Equals(change->to())
+ && deoptimize_on_undefined() == change->deoptimize_on_undefined();
+ }
+
+ private:
+ Representation from_;
+ bool deoptimize_on_undefined_;
};
@@ -1136,7 +1145,7 @@ class HClampToUint8: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1155,7 +1164,7 @@ class HToInt32: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1214,7 +1223,7 @@ class HSimulate: public HInstruction {
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) { return values_[index]; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1259,7 +1268,7 @@ class HStackCheck: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1297,7 +1306,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1314,7 +1323,7 @@ class HLeaveInlined: public HTemplateInstruction<0> {
public:
HLeaveInlined() {}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1328,7 +1337,7 @@ class HPushArgument: public HUnaryOperation {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1345,7 +1354,7 @@ class HThisFunction: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1363,7 +1372,7 @@ class HContext: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1383,7 +1392,7 @@ class HOuterContext: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(OuterContext);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1401,7 +1410,7 @@ class HGlobalObject: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1420,7 +1429,7 @@ class HGlobalReceiver: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1456,7 +1465,7 @@ class HUnaryCall: public HCall<1> {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1476,7 +1485,7 @@ class HBinaryCall: public HCall<2> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1491,7 +1500,7 @@ class HInvokeFunction: public HBinaryCall {
: HBinaryCall(context, function, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1516,7 +1525,7 @@ class HCallConstantFunction: public HCall<0> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1533,7 +1542,7 @@ class HCallKeyed: public HBinaryCall {
: HBinaryCall(context, key, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1557,7 +1566,7 @@ class HCallNamed: public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1574,7 +1583,7 @@ class HCallFunction: public HUnaryCall {
HValue* context() { return value(); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1593,7 +1602,7 @@ class HCallGlobal: public HUnaryCall {
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1613,7 +1622,7 @@ class HCallKnownGlobal: public HCall<0> {
Handle<JSFunction> target() const { return target_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -1630,7 +1639,7 @@ class HCallNew: public HBinaryCall {
: HBinaryCall(context, constructor, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1657,7 +1666,7 @@ class HCallRuntime: public HCall<1> {
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1683,7 +1692,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1707,7 +1716,7 @@ class HFixedArrayBaseLength: public HUnaryOperation {
SetFlag(kDependsOnArrayLengths);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1726,7 +1735,7 @@ class HElementsKind: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1745,7 +1754,7 @@ class HBitNot: public HUnaryOperation {
SetFlag(kTruncatingToInt32);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
virtual HType CalculateInferredType();
@@ -1795,7 +1804,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -1852,7 +1861,7 @@ class HLoadElements: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1875,7 +1884,7 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1899,7 +1908,7 @@ class HCheckMap: public HTemplateInstruction<2> {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -1929,7 +1938,7 @@ class HCheckFunction: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -1969,9 +1978,7 @@ class HCheckInstanceType: public HUnaryOperation {
return new HCheckInstanceType(value, IS_SYMBOL);
}
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2001,8 +2008,6 @@ class HCheckInstanceType: public HUnaryOperation {
LAST_INTERVAL_CHECK = IS_JS_ARRAY
};
- const char* GetCheckName();
-
HCheckInstanceType(HValue* value, Check check)
: HUnaryOperation(value), check_(check) {
set_representation(Representation::Tagged());
@@ -2020,7 +2025,7 @@ class HCheckNonSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2066,7 +2071,7 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -2097,7 +2102,7 @@ class HCheckSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
@@ -2146,7 +2151,7 @@ class HPhi: public HValue {
}
virtual Range* InferRange();
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return representation();
}
virtual HType CalculateInferredType();
@@ -2238,7 +2243,7 @@ class HArgumentsObject: public HTemplateInstruction<0> {
SetFlag(kIsArguments);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -2254,20 +2259,7 @@ class HConstant: public HTemplateInstruction<0> {
bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
- bool ImmortalImmovable() const {
- Heap* heap = HEAP;
- if (*handle_ == heap->undefined_value()) return true;
- if (*handle_ == heap->null_value()) return true;
- if (*handle_ == heap->true_value()) return true;
- if (*handle_ == heap->false_value()) return true;
- if (*handle_ == heap->the_hole_value()) return true;
- if (*handle_ == heap->minus_zero_value()) return true;
- if (*handle_ == heap->nan_value()) return true;
- if (*handle_ == heap->empty_string()) return true;
- return false;
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -2375,7 +2367,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The length is untagged, all other inputs are tagged.
return (index == 2)
? Representation::Integer32()
@@ -2402,7 +2394,7 @@ class HArgumentsElements: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -2418,7 +2410,7 @@ class HArgumentsLength: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2441,7 +2433,7 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The arguments elements is considered tagged.
return index == 0
? Representation::Tagged()
@@ -2467,7 +2459,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
@@ -2492,7 +2484,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return index == 0
? Representation::Tagged()
: representation();
@@ -2530,7 +2522,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
}
virtual HType CalculateInferredType();
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return index == 0
? Representation::Tagged()
: representation();
@@ -2557,7 +2549,7 @@ class HCompareGeneric: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2595,7 +2587,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
return input_representation_;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return input_representation_;
}
virtual void PrintDataTo(StringStream* stream);
@@ -2618,9 +2610,7 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2639,7 +2629,7 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
HValue* left() { return value(); }
int right() const { return right_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
@@ -2651,25 +2641,21 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
};
-class HIsNilAndBranch: public HUnaryControlInstruction {
+class HIsNullAndBranch: public HUnaryControlInstruction {
public:
- HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
- : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
-
- EqualityKind kind() const { return kind_; }
- NilValue nil() const { return nil_; }
+ HIsNullAndBranch(HValue* value, bool is_strict)
+ : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
- virtual void PrintDataTo(StringStream* stream);
+ bool is_strict() const { return is_strict_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
private:
- EqualityKind kind_;
- NilValue nil_;
+ bool is_strict_;
};
@@ -2678,7 +2664,7 @@ class HIsObjectAndBranch: public HUnaryControlInstruction {
explicit HIsObjectAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2693,7 +2679,7 @@ class HIsSmiAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2707,7 +2693,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction {
explicit HIsUndetectableAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2717,7 +2703,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction {
class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -2739,7 +2725,7 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2756,7 +2742,7 @@ class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
explicit HHasCachedArrayIndexAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2771,7 +2757,7 @@ class HGetCachedArrayIndex: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2790,7 +2776,7 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2814,7 +2800,7 @@ class HTypeofIsAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2831,7 +2817,7 @@ class HInstanceOf: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2859,7 +2845,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2884,7 +2870,7 @@ class HPower: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return index == 0
? Representation::Double()
: Representation::None();
@@ -3113,7 +3099,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
int ast_id() const { return ast_id_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -3134,7 +3120,7 @@ class HParameter: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -3166,7 +3152,7 @@ class HCallStub: public HUnaryCall {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3182,7 +3168,7 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
public:
HUnknownOSRValue() { set_representation(Representation::Tagged()); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -3192,15 +3178,15 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
class HLoadGlobalCell: public HTemplateInstruction<0> {
public:
- HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
- : cell_(cell), details_(details) {
+ HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+ : cell_(cell), check_hole_value_(check_hole_value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck();
+ bool check_hole_value() const { return check_hole_value_; }
virtual void PrintDataTo(StringStream* stream);
@@ -3209,7 +3195,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
return reinterpret_cast<intptr_t>(*cell_);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -3223,7 +3209,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
private:
Handle<JSGlobalPropertyCell> cell_;
- PropertyDetails details_;
+ bool check_hole_value_;
};
@@ -3248,7 +3234,7 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3264,19 +3250,17 @@ class HStoreGlobalCell: public HUnaryOperation {
public:
HStoreGlobalCell(HValue* value,
Handle<JSGlobalPropertyCell> cell,
- PropertyDetails details)
+ bool check_hole_value)
: HUnaryOperation(value),
cell_(cell),
- details_(details) {
+ check_hole_value_(check_hole_value) {
SetFlag(kChangesGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck() {
- return !details_.IsDontDelete() || details_.IsReadOnly();
- }
+ bool check_hole_value() const { return check_hole_value_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3285,7 +3269,7 @@ class HStoreGlobalCell: public HUnaryOperation {
private:
Handle<JSGlobalPropertyCell> cell_;
- PropertyDetails details_;
+ bool check_hole_value_;
};
@@ -3313,7 +3297,7 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3336,7 +3320,7 @@ class HLoadContextSlot: public HUnaryOperation {
int slot_index() const { return slot_index_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3358,7 +3342,7 @@ class HLoadContextSlot: public HUnaryOperation {
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsBoolean()
&& !value->type().IsSmi()
- && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
+ && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
}
@@ -3379,7 +3363,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
return StoringValueNeedsWriteBarrier(value());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3412,7 +3396,7 @@ class HLoadNamedField: public HUnaryOperation {
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3444,7 +3428,7 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
Handle<String> name() { return name_; }
bool need_generic() { return need_generic_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3479,7 +3463,7 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3503,7 +3487,7 @@ class HLoadFunctionPrototype: public HUnaryOperation {
HValue* function() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3527,7 +3511,7 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
return index == 0
? Representation::Tagged()
@@ -3536,7 +3520,7 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- bool RequiresHoleCheck();
+ bool RequiresHoleCheck() const;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
@@ -3558,7 +3542,7 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
return index == 0
? Representation::Tagged()
@@ -3567,6 +3551,8 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
+ bool RequiresHoleCheck() const;
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected:
@@ -3596,7 +3582,7 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32, but the base pointer
// for the element load is a naked pointer.
return index == 0
@@ -3639,7 +3625,7 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3668,7 +3654,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3717,7 +3703,7 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3731,16 +3717,14 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
class HStoreKeyedFastElement: public HTemplateInstruction<3> {
public:
- HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind = FAST_ELEMENTS)
- : elements_kind_(elements_kind) {
+ HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
SetFlag(kChangesArrayElements);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
return index == 1
? Representation::Integer32()
@@ -3750,28 +3734,14 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- bool value_is_smi() {
- return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
- }
bool NeedsWriteBarrier() {
- if (value_is_smi()) {
- return false;
- } else {
- return StoringValueNeedsWriteBarrier(value());
- }
- }
-
- bool ValueNeedsSmiCheck() {
- return value_is_smi();
+ return StoringValueNeedsWriteBarrier(value());
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
-
- private:
- ElementsKind elements_kind_;
};
@@ -3786,7 +3756,7 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
SetFlag(kChangesDoubleArrayElements);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
if (index == 1) {
return Representation::Integer32();
} else if (index == 2) {
@@ -3825,7 +3795,7 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
if (index == 0) {
return Representation::External();
} else {
@@ -3873,7 +3843,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HValue* context() { return OperandAt(3); }
bool strict_mode() { return strict_mode_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3895,7 +3865,7 @@ class HStringAdd: public HBinaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3921,7 +3891,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
// The index is supposed to be Integer32.
return index == 2
? Representation::Integer32()
@@ -3952,7 +3922,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return index == 0
? Representation::Tagged()
: Representation::Integer32();
@@ -3975,7 +3945,7 @@ class HStringLength: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4031,7 +4001,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
bool IsCopyOnWrite() const;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4065,7 +4035,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
bool fast_elements() const { return fast_elements_; }
bool has_function() const { return has_function_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4094,7 +4064,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4118,7 +4088,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4144,9 +4114,7 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4164,7 +4132,7 @@ class HToFastProperties: public HUnaryOperation {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4178,7 +4146,7 @@ class HValueOf: public HUnaryOperation {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4194,7 +4162,7 @@ class HDeleteProperty: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -4221,7 +4189,7 @@ class HIn: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* object() { return OperandAt(2); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 2d471cc294..c625fba8db 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -422,7 +422,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
};
-void HGraph::Verify(bool do_full_verify) const {
+void HGraph::Verify() const {
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@@ -473,27 +473,25 @@ void HGraph::Verify(bool do_full_verify) const {
// Check special property of first block to have no predecessors.
ASSERT(blocks_.at(0)->predecessors()->is_empty());
- if (do_full_verify) {
- // Check that the graph is fully connected.
- ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
- ASSERT(analyzer.visited_count() == blocks_.length());
+ // Check that the graph is fully connected.
+ ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+ ASSERT(analyzer.visited_count() == blocks_.length());
- // Check that entry block dominator is NULL.
- ASSERT(entry_block_->dominator() == NULL);
+ // Check that entry block dominator is NULL.
+ ASSERT(entry_block_->dominator() == NULL);
- // Check dominators.
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_.at(i);
- if (block->dominator() == NULL) {
- // Only start block may have no dominator assigned to.
- ASSERT(i == 0);
- } else {
- // Assert that block is unreachable if dominator must not be visited.
- ReachabilityAnalyzer dominator_analyzer(entry_block_,
- blocks_.length(),
- block->dominator());
- ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
- }
+ // Check dominators.
+ for (int i = 0; i < blocks_.length(); ++i) {
+ HBasicBlock* block = blocks_.at(i);
+ if (block->dominator() == NULL) {
+ // Only start block may have no dominator assigned to.
+ ASSERT(i == 0);
+ } else {
+ // Assert that block is unreachable if dominator must not be visited.
+ ReachabilityAnalyzer dominator_analyzer(entry_block_,
+ blocks_.length(),
+ block->dominator());
+ ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
}
}
}
@@ -852,7 +850,7 @@ void HGraph::EliminateUnreachablePhis() {
}
-bool HGraph::CheckArgumentsPhiUses() {
+bool HGraph::CheckPhis() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -865,11 +863,13 @@ bool HGraph::CheckArgumentsPhiUses() {
}
-bool HGraph::CheckConstPhiUses() {
+bool HGraph::CollectPhis() {
int block_count = blocks_.length();
+ phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
+ phi_list_->Add(phi);
// Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -880,18 +880,6 @@ bool HGraph::CheckConstPhiUses() {
}
-void HGraph::CollectPhis() {
- int block_count = blocks_.length();
- phi_list_ = new ZoneList<HPhi*>(block_count);
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi);
- }
- }
-}
-
-
void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
BitVector in_worklist(GetMaximumValueID());
for (int i = 0; i < worklist->length(); ++i) {
@@ -1499,6 +1487,9 @@ int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
block->block_id() < dominated->block_id() &&
visited_on_paths_.Add(block->block_id())) {
side_effects |= block_side_effects_[block->block_id()];
+ if (block->IsLoopHeader()) {
+ side_effects |= loop_side_effects_[block->block_id()];
+ }
side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
dominator, block);
}
@@ -1860,7 +1851,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
}
if (new_value == NULL) {
- new_value = new(zone()) HChange(value, to,
+ new_value = new(zone()) HChange(value, value->representation(), to,
is_truncating, deoptimize_on_undefined);
}
@@ -2311,7 +2302,7 @@ HGraph* HGraphBuilder::CreateGraph() {
// Handle implicit declaration of the function name in named function
// expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) {
- HandleDeclaration(scope->function(), CONST, NULL);
+ HandleDeclaration(scope->function(), Variable::CONST, NULL);
}
VisitDeclarations(scope->declarations());
AddSimulate(AstNode::kDeclarationsId);
@@ -2332,24 +2323,17 @@ HGraph* HGraphBuilder::CreateGraph() {
graph()->OrderBlocks();
graph()->AssignDominators();
-
-#ifdef DEBUG
- // Do a full verify after building the graph and computing dominators.
- graph()->Verify(true);
-#endif
-
graph()->PropagateDeoptimizingMark();
- if (!graph()->CheckConstPhiUses()) {
- Bailout("Unsupported phi use of const variable");
- return NULL;
- }
graph()->EliminateRedundantPhis();
- if (!graph()->CheckArgumentsPhiUses()) {
- Bailout("Unsupported phi use of arguments");
+ if (!graph()->CheckPhis()) {
+ Bailout("Unsupported phi use of arguments object");
return NULL;
}
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
- graph()->CollectPhis();
+ if (!graph()->CollectPhis()) {
+ Bailout("Unsupported phi use of uninitialized constant");
+ return NULL;
+ }
HInferRepresentation rep(graph());
rep.Analyze();
@@ -3141,21 +3125,11 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
- if (variable->mode() == LET) {
+ if (variable->mode() == Variable::LET) {
return Bailout("reference to let variable");
}
switch (variable->location()) {
case Variable::UNALLOCATED: {
- // Handle known global constants like 'undefined' specially to avoid a
- // load from a global cell for them.
- Handle<Object> constant_value =
- isolate()->factory()->GlobalConstantFor(variable->name());
- if (!constant_value.is_null()) {
- HConstant* instr =
- new(zone()) HConstant(constant_value, Representation::Tagged());
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
-
LookupResult lookup;
GlobalPropertyAccess type =
LookupGlobalProperty(variable, &lookup, false);
@@ -3168,8 +3142,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) {
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HLoadGlobalCell* instr =
- new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
+ bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+ HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
return ast_context()->ReturnInstruction(instr, expr->id());
} else {
HValue* context = environment()->LookupContext();
@@ -3188,7 +3162,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
- if (variable->mode() == CONST &&
+ if (variable->mode() == Variable::CONST &&
value == graph()->GetConstantHole()) {
return Bailout("reference to uninitialized const variable");
}
@@ -3196,7 +3170,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
case Variable::CONTEXT: {
- if (variable->mode() == CONST) {
+ if (variable->mode() == Variable::CONST) {
return Bailout("reference to const context slot");
}
HValue* context = BuildContextChainWalk(variable);
@@ -3346,43 +3320,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
- HInstruction* elements_kind =
- AddInstruction(new(zone()) HElementsKind(literal));
- HBasicBlock* store_fast = graph()->CreateBasicBlock();
- // Two empty blocks to satisfy edge split form.
- HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
- HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
- HBasicBlock* store_generic = graph()->CreateBasicBlock();
- HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
-
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
- smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
- smicheck->SetSuccessorAt(1, check_smi_only_elements);
- current_block()->Finish(smicheck);
- store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
-
- set_current_block(check_smi_only_elements);
- HCompareConstantEqAndBranch* smi_elements_check =
- new(zone()) HCompareConstantEqAndBranch(elements_kind,
- FAST_SMI_ONLY_ELEMENTS,
- Token::EQ_STRICT);
- smi_elements_check->SetSuccessorAt(0, store_generic);
- smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2);
- current_block()->Finish(smi_elements_check);
- store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
-
- set_current_block(store_fast);
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
- store_fast->Goto(join);
-
- set_current_block(store_generic);
- AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
- store_generic->Goto(join);
-
- join->SetJoinId(expr->id());
- set_current_block(join);
-
AddSimulate(expr->GetIdForElement(i));
}
return ast_context()->ReturnValue(Pop());
@@ -3626,10 +3564,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
+ bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
- HInstruction* instr =
- new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
+ HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
instr->set_position(position);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id);
@@ -3663,7 +3601,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST || var->mode() == LET) {
+ if (var->mode() == Variable::CONST || var->mode() == Variable::LET) {
return Bailout("unsupported let or const compound assignment");
}
@@ -3808,7 +3746,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
HandlePropertyAssignment(expr);
} else if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST) {
+ if (var->mode() == Variable::CONST) {
if (expr->op() != Token::INIT_CONST) {
return Bailout("non-initializer assignment to const");
}
@@ -3819,7 +3757,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
AddInstruction(new HUseConst(old_value));
- } else if (var->mode() == LET) {
+ } else if (var->mode() == Variable::LET) {
return Bailout("unsupported assignment to let");
}
@@ -3847,7 +3785,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
}
case Variable::CONTEXT: {
- ASSERT(var->mode() != CONST);
+ ASSERT(var->mode() != Variable::CONST);
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@@ -3993,7 +3931,6 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -4010,30 +3947,6 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
}
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- ElementsKind elements_kind,
- bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- return new(zone()) HStoreKeyedFastDoubleElement(
- elements, checked_key, val);
- } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
- return new(zone()) HStoreKeyedFastElement(
- elements, checked_key, val, elements_kind);
- }
- }
- // It's an element load (!is_store).
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
- } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
- return new(zone()) HLoadKeyedFastElement(elements, checked_key);
- }
-}
-
-
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@@ -4041,20 +3954,17 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
bool is_store) {
ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType();
- AddInstruction(new(zone()) HCheckNonSmi(object));
- HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
- bool fast_smi_only_elements = map->has_fast_smi_only_elements();
- bool fast_elements = map->has_fast_elements();
- bool fast_double_elements = map->has_fast_double_elements();
- if (!fast_smi_only_elements &&
- !fast_elements &&
- !fast_double_elements &&
+ if (!map->has_fast_elements() &&
+ !map->has_fast_double_elements() &&
!map->has_external_array_elements()) {
return is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key);
}
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
- if (is_store && (fast_elements || fast_smi_only_elements)) {
+ bool fast_double_elements = map->has_fast_double_elements();
+ if (is_store && map->has_fast_elements()) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map()));
}
@@ -4069,15 +3979,28 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store);
}
- ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
+ ASSERT(map->has_fast_elements() || fast_double_elements);
if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- return BuildFastElementAccess(elements, checked_key, val,
- map->elements_kind(), is_store);
+ if (is_store) {
+ if (fast_double_elements) {
+ return new(zone()) HStoreKeyedFastDoubleElement(elements,
+ checked_key,
+ val);
+ } else {
+ return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
+ }
+ } else {
+ if (fast_double_elements) {
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+ } else {
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key);
+ }
+ }
}
@@ -4119,20 +4042,14 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
- // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
- // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
- // arrays.
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ // FAST_ELEMENTS is assumed to be the first case.
+ STATIC_ASSERT(FAST_ELEMENTS == 0);
- for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
+ for (ElementsKind elements_kind = FAST_ELEMENTS;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
- // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
- // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
- // that's executed for all external array cases.
+ // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
+ // need to add some code that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -4154,25 +4071,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements_kind == FAST_ELEMENTS ||
+ if (elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
- if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- AddInstruction(new(zone()) HCheckSmi(val));
- }
- if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
+ bool fast_double_elements =
+ elements_kind == FAST_DOUBLE_ELEMENTS;
+ if (is_store && elements_kind == FAST_ELEMENTS) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
}
- // TODO(jkummerow): The need for these two blocks could be avoided
- // in one of two ways:
- // (1) Introduce ElementsKinds for JSArrays that are distinct from
- // those for fast objects.
- // (2) Put the common instructions into a third "join" block. This
- // requires additional AST IDs that we can deopt to from inside
- // that join block. They must be added to the Property class (when
- // it's a keyed property) and registered in the full codegen.
HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
HHasInstanceTypeAndBranch* typecheck =
@@ -4182,15 +4089,29 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
current_block()->Finish(typecheck);
set_current_block(if_jsarray);
- HInstruction* length;
- length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
+ HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
+ AddInstruction(length);
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
- if (!is_store) {
+ if (is_store) {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastDoubleElement(elements,
+ checked_key,
+ val));
+ } else {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+ }
+ } else {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+ } else {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastElement(elements, checked_key));
+ }
Push(access);
}
-
*has_side_effects |= access->HasSideEffects();
if (position != -1) {
access->set_position(position);
@@ -4200,8 +4121,25 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
- access = AddInstruction(BuildFastElementAccess(
- elements, checked_key, val, elements_kind, is_store));
+ if (is_store) {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastDoubleElement(elements,
+ checked_key,
+ val));
+ } else {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+ }
+ } else {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+ } else {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastElement(elements, checked_key));
+ }
+ }
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -4539,25 +4477,20 @@ bool HGraphBuilder::TryInline(Call* expr) {
return false;
}
+ // No context change required.
CompilationInfo* outer_info = info();
-#if !defined(V8_TARGET_ARCH_IA32)
- // Target must be able to use caller's context.
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
TraceInline(target, caller, "target requires context change");
return false;
}
-#endif
-
// Don't inline deeper than kMaxInliningLevels calls.
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
- if (current_level == (FLAG_limit_inlining
- ? Compiler::kMaxInliningLevels
- : 2 * Compiler::kMaxInliningLevels)) {
+ if (current_level == Compiler::kMaxInliningLevels) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
@@ -4663,8 +4596,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
ASSERT(target_shared->has_deoptimization_support());
TypeFeedbackOracle target_oracle(
Handle<Code>(target_shared->code()),
- Handle<Context>(target->context()->global_context()),
- isolate());
+ Handle<Context>(target->context()->global_context()));
FunctionState target_state(this, &target_info, &target_oracle);
HConstant* undefined = graph()->GetConstantUndefined();
@@ -4673,17 +4605,6 @@ bool HGraphBuilder::TryInline(Call* expr) {
function,
undefined,
call_kind);
-#ifdef V8_TARGET_ARCH_IA32
- // IA32 only, overwrite the caller's context in the deoptimization
- // environment with the correct one.
- //
- // TODO(kmillikin): implement the same inlining on other platforms so we
- // can remove the unsightly ifdefs in this function.
- HConstant* context = new HConstant(Handle<Context>(target->context()),
- Representation::Tagged());
- AddInstruction(context);
- inner_env->BindContext(context);
-#endif
HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry);
body_entry->SetJoinId(expr->ReturnId());
@@ -5004,8 +4925,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else {
- expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ // FIXME.
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
@@ -5057,46 +4978,6 @@ void HGraphBuilder::VisitCall(Call* expr) {
Drop(argument_count);
}
- } else if (expr->IsMonomorphic()) {
- // The function is on the stack in the unoptimized code during
- // evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HValue* context = environment()->LookupContext();
- HGlobalObject* global = new(zone()) HGlobalObject(context);
- HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
- AddInstruction(global);
- PushAndAdd(receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
- if (TryInline(expr)) {
- // The function is lingering in the deoptimization environment.
- // Handle it by case analysis on the AST context.
- if (ast_context()->IsEffect()) {
- Drop(1);
- } else if (ast_context()->IsValue()) {
- HValue* result = Pop();
- Drop(1);
- Push(result);
- } else if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- if (context->if_true()->HasPredecessor()) {
- context->if_true()->last_environment()->Drop(1);
- }
- if (context->if_false()->HasPredecessor()) {
- context->if_true()->last_environment()->Drop(1);
- }
- } else {
- UNREACHABLE();
- }
- return;
- } else {
- call = PreProcessCall(new(zone()) HInvokeFunction(context,
- function,
- argument_count));
- Drop(1); // The function.
- }
-
} else {
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* context = environment()->LookupContext();
@@ -5403,7 +5284,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST) {
+ if (var->mode() == Variable::CONST) {
return Bailout("unsupported count operation with const");
}
// Argument of the count operation is a variable, not a property.
@@ -5790,36 +5671,26 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
}
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- Expression* sub_expr,
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+ Expression* expr,
Handle<String> check) {
- CHECK_ALIVE(VisitForTypeOf(sub_expr));
- HValue* value = Pop();
- HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
- instr->set_position(expr->position());
- return ast_context()->ReturnControl(instr, expr->id());
+ CHECK_ALIVE(VisitForTypeOf(expr));
+ HValue* expr_value = Pop();
+ HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
+ instr->set_position(compare_expr->position());
+ return ast_context()->ReturnControl(instr, compare_expr->id());
}
-bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) {
- Expression *sub_expr;
- Handle<String> check;
- if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
- HandleLiteralCompareTypeof(expr, sub_expr, check);
- return true;
- }
-
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
- HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
- return true;
- }
-
- if (expr->IsLiteralCompareNull(&sub_expr)) {
- HandleLiteralCompareNil(expr, sub_expr, kNullValue);
- return true;
- }
-
- return false;
+void HGraphBuilder::HandleLiteralCompareUndefined(
+ CompareOperation* compare_expr, Expression* expr) {
+ CHECK_ALIVE(VisitForValue(expr));
+ HValue* lhs = Pop();
+ HValue* rhs = graph()->GetConstantUndefined();
+ HCompareObjectEqAndBranch* instr =
+ new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
+ instr->set_position(compare_expr->position());
+ return ast_context()->ReturnControl(instr, compare_expr->id());
}
@@ -5841,7 +5712,17 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
// Check for special cases that compare against literals.
- if (TryLiteralCompare(expr)) return;
+ Expression *sub_expr;
+ Handle<String> check;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ HandleLiteralCompareTypeof(expr, sub_expr, check);
+ return;
+ }
+
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ HandleLiteralCompareUndefined(expr, sub_expr);
+ return;
+ }
TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
@@ -5946,18 +5827,14 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
+void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- CHECK_ALIVE(VisitForValue(sub_expr));
+ CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- EqualityKind kind =
- expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
- HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
- instr->set_position(expr->position());
+ HIsNullAndBranch* instr =
+ new(zone()) HIsNullAndBranch(value, expr->is_strict());
return ast_context()->ReturnControl(instr, expr->id());
}
@@ -5977,9 +5854,9 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function) {
- if (mode == LET) return Bailout("unsupported let declaration");
+ if (mode == Variable::LET) return Bailout("unsupported let declaration");
Variable* var = proxy->var();
switch (var->location()) {
case Variable::UNALLOCATED:
@@ -5987,9 +5864,9 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT:
- if (mode == CONST || function != NULL) {
+ if (mode == Variable::CONST || function != NULL) {
HValue* value = NULL;
- if (mode == CONST) {
+ if (mode == Variable::CONST) {
value = graph()->GetConstantHole();
} else {
VisitForValue(function);
@@ -6040,7 +5917,9 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+ new(zone()) HHasInstanceTypeAndBranch(value,
+ JS_FUNCTION_TYPE,
+ JS_FUNCTION_PROXY_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -6940,7 +6819,7 @@ void HPhase::End() const {
}
#ifdef DEBUG
- if (graph_ != NULL) graph_->Verify(false); // No full verify.
+ if (graph_ != NULL) graph_->Verify();
if (allocator_ != NULL) allocator_->Verify();
#endif
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index b66042c2cb..03fbc73220 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -243,13 +243,11 @@ class HGraph: public ZoneObject {
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
- bool CheckArgumentsPhiUses();
+ bool CheckPhis();
- // Returns false if there are phi-uses of an uninitialized const
- // which are not supported by the optimizing compiler.
- bool CheckConstPhiUses();
-
- void CollectPhis();
+ // Returns false if there are phi-uses of hole values comming
+ // from uninitialized consts.
+ bool CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
@@ -285,7 +283,7 @@ class HGraph: public ZoneObject {
}
#ifdef DEBUG
- void Verify(bool do_full_verify) const;
+ void Verify() const;
#endif
private:
@@ -782,7 +780,7 @@ class HGraphBuilder: public AstVisitor {
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void HandleDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function);
void VisitDelete(UnaryOperation* expr);
@@ -912,13 +910,11 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
SmallMapList* types,
Handle<String> name);
- bool TryLiteralCompare(CompareOperation* expr);
- void HandleLiteralCompareTypeof(CompareOperation* expr,
- Expression* sub_expr,
+ void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+ Expression* expr,
Handle<String> check);
- void HandleLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil);
+ void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
+ Expression* expr);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
HValue* string,
@@ -942,11 +938,6 @@ class HGraphBuilder: public AstVisitor {
HValue* val,
ElementsKind elements_kind,
bool is_store);
- HInstruction* BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- ElementsKind elements_kind,
- bool is_store);
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 446aa3e2de..0ca2d6b4a8 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -89,13 +89,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
- Assembler::set_target_address_at(pc_, target);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
+ Assembler::set_target_address_at(pc_, target);
}
@@ -121,10 +116,6 @@ void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
- if (host() != NULL && target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
}
@@ -156,12 +147,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
- if (host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
}
@@ -176,11 +161,6 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Assembler::set_target_address_at(pc_ + 1, target);
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
}
@@ -214,7 +194,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
@@ -242,7 +222,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 66a98841a2..999647487e 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -55,8 +55,6 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@@ -88,23 +86,23 @@ void CpuFeatures::Probe() {
__ pushfd();
__ push(ecx);
__ push(ebx);
- __ mov(ebp, esp);
+ __ mov(ebp, Operand(esp));
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd();
__ pop(eax);
- __ mov(edx, eax);
+ __ mov(edx, Operand(eax));
__ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax);
__ popfd();
__ pushfd();
__ pop(eax);
- __ xor_(eax, edx); // Different if CPUID is supported.
+ __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
__ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, eax);
- __ xor_(edx, edx);
+ __ xor_(eax, Operand(eax));
+ __ xor_(edx, Operand(edx));
__ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in
@@ -120,13 +118,13 @@ void CpuFeatures::Probe() {
// Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported.
- __ mov(eax, edx);
+ __ mov(eax, Operand(edx));
__ or_(eax, 1 << CPUID);
- __ mov(edx, ecx);
+ __ mov(edx, Operand(ecx));
// Done.
__ bind(&done);
- __ mov(esp, ebp);
+ __ mov(esp, Operand(ebp));
__ pop(ebx);
__ pop(ecx);
__ popfd();
@@ -288,18 +286,6 @@ bool Operand::is_reg(Register reg) const {
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
}
-
-bool Operand::is_reg_only() const {
- return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
-}
-
-
-Register Operand::reg() const {
- ASSERT(is_reg_only());
- return Register::from_code(buf_[0] & 0x07);
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -715,13 +701,6 @@ void Assembler::add(Register dst, const Operand& src) {
}
-void Assembler::add(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x01);
- emit_operand(src, dst);
-}
-
-
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@@ -762,29 +741,25 @@ void Assembler::and_(const Operand& dst, Register src) {
void Assembler::cmpb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
- if (op.is_reg(eax)) {
- EMIT(0x3C);
- } else {
- EMIT(0x80);
- emit_operand(edi, op); // edi == 7
- }
+ EMIT(0x80);
+ emit_operand(edi, op); // edi == 7
EMIT(imm8);
}
-void Assembler::cmpb(const Operand& op, Register reg) {
- ASSERT(reg.is_byte_register());
+void Assembler::cmpb(const Operand& dst, Register src) {
+ ASSERT(src.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
- emit_operand(reg, op);
+ emit_operand(src, dst);
}
-void Assembler::cmpb(Register reg, const Operand& op) {
- ASSERT(reg.is_byte_register());
+void Assembler::cmpb(Register dst, const Operand& src) {
+ ASSERT(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
- emit_operand(reg, op);
+ emit_operand(dst, src);
}
@@ -1094,6 +1069,18 @@ void Assembler::shr_cl(Register dst) {
}
+void Assembler::subb(const Operand& op, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ if (op.is_reg(eax)) {
+ EMIT(0x2c);
+ } else {
+ EMIT(0x80);
+ emit_operand(ebp, op); // ebp == 5
+ }
+ EMIT(imm8);
+}
+
+
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(5, dst, x);
@@ -1107,6 +1094,14 @@ void Assembler::sub(Register dst, const Operand& src) {
}
+void Assembler::subb(Register dst, const Operand& src) {
+ ASSERT(dst.code() < 4);
+ EnsureSpace ensure_space(this);
+ EMIT(0x2A);
+ emit_operand(dst, src);
+}
+
+
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x29);
@@ -1163,10 +1158,6 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && op.reg().code() >= 4) {
- test(op, Immediate(imm8));
- return;
- }
EnsureSpace ensure_space(this);
EMIT(0xF6);
emit_operand(eax, op);
@@ -1187,10 +1178,10 @@ void Assembler::xor_(Register dst, const Operand& src) {
}
-void Assembler::xor_(const Operand& dst, Register src) {
+void Assembler::xor_(const Operand& src, Register dst) {
EnsureSpace ensure_space(this);
EMIT(0x31);
- emit_operand(src, dst);
+ emit_operand(dst, src);
}
@@ -2480,7 +2471,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4dfde5f62f..4698e3ed1b 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -75,8 +75,6 @@ struct Register {
static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
- ASSERT(code >= 0);
- ASSERT(code < kNumRegisters);
Register r = { code };
return r;
}
@@ -302,6 +300,9 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED {
public:
+ // reg
+ INLINE(explicit Operand(Register reg));
+
// XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg));
@@ -346,16 +347,12 @@ class Operand BASE_EMBEDDED {
// Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const;
- // Returns true if this Operand is a wrapper for one register.
- bool is_reg_only() const;
-
- // Asserts that this Operand is a wrapper for one register and returns the
- // register.
- Register reg() const;
-
private:
- // reg
- INLINE(explicit Operand(Register reg));
+ byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocInfo::Mode rmode_;
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -365,15 +362,7 @@ class Operand BASE_EMBEDDED {
inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
-
friend class Assembler;
- friend class MacroAssembler;
- friend class LCodeGen;
};
@@ -682,9 +671,7 @@ class Assembler : public AssemblerBase {
void leave();
// Moves
- void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
- void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src);
@@ -700,24 +687,17 @@ class Assembler : public AssemblerBase {
void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src);
- void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src);
- void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src);
- void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src);
- void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src);
// Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32);
void cmov(Condition cc, Register dst, Handle<Object> handle);
- void cmov(Condition cc, Register dst, Register src) {
- cmov(cc, dst, Operand(src));
- }
void cmov(Condition cc, Register dst, const Operand& src);
// Flag management.
@@ -735,31 +715,24 @@ class Assembler : public AssemblerBase {
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
- void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src);
- void add(const Operand& dst, Register src);
- void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
- void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src);
- void and_(const Operand& dst, Register src);
+ void and_(const Operand& src, Register dst);
void and_(const Operand& dst, const Immediate& x);
- void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register reg, const Operand& op);
- void cmpb(const Operand& op, Register reg);
+ void cmpb(Register src, const Operand& dst);
+ void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
- void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
@@ -775,7 +748,6 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
- void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@@ -792,10 +764,8 @@ class Assembler : public AssemblerBase {
void not_(Register dst);
void or_(Register dst, int32_t imm32);
- void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src);
- void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
@@ -806,42 +776,35 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src);
- void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst);
- void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst);
- void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
+ void subb(const Operand& dst, int8_t imm8);
+ void subb(Register dst, const Operand& src);
void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm);
- void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src);
- void xor_(const Operand& dst, Register src);
- void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
+ void xor_(const Operand& src, Register dst);
void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
- void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
// Miscellaneous
@@ -872,7 +835,6 @@ class Assembler : public AssemblerBase {
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr);
- void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
@@ -883,7 +845,6 @@ class Assembler : public AssemblerBase {
// unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
@@ -968,7 +929,6 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
- void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -1009,14 +969,12 @@ class Assembler : public AssemblerBase {
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
- void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
- void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void movd(const Operand& dst, XMMRegister src);
+ void movd(const Operand& src, XMMRegister dst);
void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
+ void movss(const Operand& src, XMMRegister dst);
void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
@@ -1029,17 +987,11 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
- void pextrd(Register dst, XMMRegister src, int8_t offset) {
- pextrd(Operand(dst), src, offset);
- }
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
- void pinsrd(XMMRegister dst, Register src, int8_t offset) {
- pinsrd(dst, Operand(src), offset);
- }
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
- void movntdqa(XMMRegister dst, const Operand& src);
+ void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@@ -1093,9 +1045,6 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
protected:
bool emit_debug_code() const { return emit_debug_code_; }
@@ -1108,8 +1057,9 @@ class Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; }
-
private:
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 53ade3a6c9..310ea3d123 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
- __ add(eax, Immediate(num_extra_args + 1));
+ __ add(Operand(eax), Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -80,34 +80,25 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- edi: constructor function
// -----------------------------------
- Label slow, non_function_call;
+ Label non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
+ __ j(not_equal, &non_function_call);
// Jump to the function-specific construct stub.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ __ jmp(Operand(ebx));
// edi: called object
// eax: number of arguments
- // ecx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
__ bind(&non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
Handle<Code> arguments_adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ SetCallKind(ecx, CALL_AS_METHOD);
@@ -122,271 +113,264 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT(!is_api_function || !count_constructions);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ __ EnterConstructFrame();
- // Store a smi-tagged arguments count on the stack.
- __ SmiTag(eax);
- __ push(eax);
+ // Store a smi-tagged arguments count on the stack.
+ __ SmiTag(eax);
+ __ push(eax);
- // Push the function to invoke on the stack.
- __ push(edi);
+ // Push the function to invoke on the stack.
+ __ push(edi);
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+ __ j(not_equal, &rt_call);
#endif
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // edi: constructor
+ __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ dec_b(FieldOperand(ecx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(eax);
- __ push(edi);
+ __ push(eax);
+ __ push(edi);
- __ push(edi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ push(edi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ pop(edi);
- __ pop(eax);
+ __ pop(edi);
+ __ pop(eax);
- __ bind(&allocate);
- }
+ __ bind(&allocate);
+ }
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
- __ AllocateInNewSpace(
- edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- __ mov(edx, factory->undefined_value());
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+ __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ Factory* factory = masm->isolate()->factory();
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ // Set extra fields in the newly allocated object.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ { Label loop, entry;
+ // To allow for truncation.
if (count_constructions) {
- __ movzx_b(esi,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
- }
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ or_(ebx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, ecx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, ecx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- { Label loop, entry;
+ } else {
__ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, ecx);
- __ j(below, &loop);
}
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(ecx, 0), edx);
+ __ add(Operand(ecx), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(ecx, Operand(edi));
+ __ j(less, &loop);
+ }
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ __ or_(Operand(ebx), Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // eax: initial map
+ // ebx: JSObject
+ // edi: start of next object
+ // Calculate the total number of properties described by the map.
+ __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ add(edx, Operand(ecx));
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+ __ sub(edx, Operand(ecx));
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // ebx: JSObject
+ // edi: start of next object (will be start of FixedArray)
+ // edx: number of elements in properties array
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // ebx: JSObject
+ // edi: FixedArray
+ // edx: number of elements
+ // ecx: start of next object
+ __ mov(eax, factory->fixed_array_map());
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
+ __ SmiTag(edx);
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
+
+ // Initialize the fields to undefined.
+ // ebx: JSObject
+ // edi: FixedArray
+ // ecx: start of next object
+ { Label loop, entry;
+ __ mov(edx, factory->undefined_value());
+ __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(eax, 0), edx);
+ __ add(Operand(eax), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(eax, Operand(ecx));
+ __ j(below, &loop);
+ }
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // ebx: JSObject
+ // edi: FixedArray
+ __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag
+ __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
- // Continue with JSObject being successfully allocated
- // ebx: JSObject
- __ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
- }
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject
+ __ jmp(&allocated);
- // Allocate the new receiver object using the runtime call.
- __ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
- // edi: function (constructor)
- __ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(ebx, eax); // store result in ebx
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // ebx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(ebx);
+ }
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(edi);
+ // Allocate the new receiver object using the runtime call.
+ __ bind(&rt_call);
+ // Must restore edi (constructor) before calling runtime.
+ __ mov(edi, Operand(esp, 0));
+ // edi: function (constructor)
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(ebx, Operand(eax)); // store result in ebx
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- __ SmiUntag(eax);
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(edi);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ SmiUntag(eax);
- // Setup pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Setup pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, Operand(eax));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(ebx, ecx, times_4, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Call the function.
+ if (is_api_function) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &exit);
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
- // Leave construct frame.
- }
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, kPointerSize)); // get arguments count
+ __ LeaveConstructFrame();
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -415,58 +399,57 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
- // Clear the context before we push it when entering the internal frame.
+ // Clear the context before we push it when entering the JS frame.
__ Set(esi, Immediate(0));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+ // Get the function from the frame and setup the context.
+ __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+ __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
- // Push the function and the receiver onto the stack.
- __ push(ecx);
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+ // Push the function and the receiver onto the stack.
+ __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+ // Load the number of arguments and setup pointer to the arguments.
+ __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+ __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Set(ecx, Immediate(0));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-
- // Get the function from the stack and call it.
- // kPointerSize for the receiver.
- __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
-
- // Invoke the code.
- if (is_construct) {
- __ call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Copy arguments to the stack in a loop.
+ Label loop, entry;
+ __ Set(ecx, Immediate(0));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
+ __ push(Operand(edx, 0)); // dereference handle
+ __ inc(Operand(ecx));
+ __ bind(&entry);
+ __ cmp(ecx, Operand(eax));
+ __ j(not_equal, &loop);
+
+ // Get the function from the stack and call it.
+ __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver
- // Exit the internal frame. Notice that this also removes the empty.
- // context and the function left on the stack by the code
- // invocation.
+ // Invoke the code.
+ if (is_construct) {
+ __ call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
- __ ret(kPointerSize); // Remove receiver.
+
+ // Exit the JS frame. Notice that this also removes the empty
+ // context and the function left on the stack by the code
+ // invocation.
+ __ LeaveInternalFrame();
+ __ ret(1 * kPointerSize); // remove receiver
}
@@ -481,68 +464,68 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
+ // Push a copy of the function.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ __ jmp(Operand(eax));
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
+ // Push a copy of the function onto the stack.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ __ jmp(Operand(eax));
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
- // Pass the function and deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Pass the function and deoptimization type to the runtime system.
+ __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -583,10 +566,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
+ __ EnterInternalFrame();
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ __ LeaveInternalFrame();
__ popad();
__ ret(0);
}
@@ -597,7 +579,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{ Label done;
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(not_zero, &done);
__ pop(ebx);
__ push(Immediate(factory->undefined_value()));
@@ -649,21 +631,18 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ SmiTag(eax);
+ __ push(eax);
- { // In order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ push(eax);
-
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
-
- __ pop(eax);
- __ SmiUntag(eax);
- }
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, eax);
+ __ Set(edx, Immediate(0)); // restore
+ __ pop(eax);
+ __ SmiUntag(eax);
+ __ LeaveInternalFrame();
// Restore the function to edi.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver);
@@ -716,11 +695,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ test(edx, edx);
+ __ test(edx, Operand(edx));
__ j(zero, &function);
__ Set(ebx, Immediate(0));
__ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(edx, Immediate(1));
+ __ cmp(Operand(edx), Immediate(1));
__ j(not_equal, &non_proxy);
__ pop(edx); // return address
@@ -747,13 +726,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
__ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(eax, ebx);
+ __ cmp(eax, Operand(ebx));
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
+ __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -761,156 +740,155 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kArgumentsOffset = 2 * kPointerSize;
static const int kReceiverOffset = 3 * kPointerSize;
static const int kFunctionOffset = 4 * kPointerSize;
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ mov(ebx, Operand(ebp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- Factory* factory = masm->isolate()->factory();
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &push_receiver);
+ __ EnterInternalFrame();
+
+ __ push(Operand(ebp, kFunctionOffset)); // push this
+ __ push(Operand(ebp, kArgumentsOffset)); // push arguments
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, Operand(esp));
+ __ sub(ecx, Operand(edi));
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, Operand(eax));
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, Operand(edx));
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(eax); // limit
+ __ push(Immediate(0)); // index
+
+ // Get the receiver.
+ __ mov(ebx, Operand(ebp, kReceiverOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &call_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &push_receiver);
+ // Change context eagerly to get the right global object if necessary.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ jmp(&push_receiver);
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &push_receiver);
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
+ Factory* factory = masm->isolate()->factory();
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ mov(eax, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
+ // Do not transform the receiver for natives (shared already in ecx).
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
+ // Call ToObject on the receiver if it is not an object, or use the
+ // global object if it is null or undefined.
+ __ JumpIfSmi(ebx, &call_to_object);
+ __ cmp(ebx, factory->null_value());
+ __ j(equal, &use_global_receiver);
+ __ cmp(ebx, factory->undefined_value());
+ __ j(equal, &use_global_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &push_receiver);
+
+ __ bind(&call_to_object);
+ __ push(ebx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(ebx, Operand(eax));
+ __ jmp(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(ebx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
- // Push the nth argument.
- __ push(eax);
+ // Push the nth argument.
+ __ push(eax);
- // Update the index on the stack and in register eax.
- __ mov(eax, Operand(ebp, kIndexOffset));
- __ add(eax, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), eax);
+ // Update the index on the stack and in register eax.
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ add(Operand(eax), Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, kIndexOffset), eax);
- __ bind(&entry);
- __ cmp(eax, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
+ __ bind(&entry);
+ __ cmp(eax, Operand(ebp, kLimitOffset));
+ __ j(not_equal, &loop);
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(eax);
- __ SmiUntag(eax);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(eax);
+ __ SmiUntag(eax);
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
- frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ LeaveInternalFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(edi); // add function proxy as last argument
- __ inc(eax);
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(edi); // add function proxy as last argument
+ __ inc(eax);
+ __ Set(ebx, Immediate(0));
+ __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
- // Leave internal frame.
- }
+ __ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
}
@@ -1005,9 +983,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(scratch1, 0), factory->the_hole_value());
- __ add(scratch1, Immediate(kPointerSize));
+ __ add(Operand(scratch1), Immediate(kPointerSize));
__ bind(&entry);
- __ cmp(scratch1, scratch2);
+ __ cmp(scratch1, Operand(scratch2));
__ j(below, &loop);
}
}
@@ -1104,7 +1082,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ bind(&loop);
__ stos();
__ bind(&entry);
- __ cmp(edi, elements_array_end);
+ __ cmp(edi, Operand(elements_array_end));
__ j(below, &loop);
__ bind(&done);
}
@@ -1142,7 +1120,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ push(eax);
// Check for array construction with zero arguments.
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
@@ -1169,7 +1147,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ j(not_equal, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(not_zero, &not_empty_array);
// The single argument passed is zero, so we jump to the code above used to
@@ -1182,7 +1160,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
- __ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots.
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
@@ -1272,7 +1250,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
__ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
+ __ add(Operand(edx), Immediate(kPointerSize));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
@@ -1378,14 +1356,14 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
+ __ cmp(edi, Operand(ecx));
__ Assert(equal, "Unexpected String function");
}
// Load the first argument into eax and get rid of the rest
// (including the receiver).
Label no_arguments;
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(zero, &no_arguments);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ pop(ecx);
@@ -1461,13 +1439,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Invoke the conversion builtin and put the result into ebx.
__ bind(&convert_argument);
__ IncrementCounter(counters->string_ctor_conversions(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
- }
+ __ EnterInternalFrame();
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ __ LeaveInternalFrame();
__ mov(ebx, eax);
__ jmp(&argument_is_string);
@@ -1484,18 +1461,17 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
+ __ EnterInternalFrame();
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
__ ret(0);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
- __ mov(ebp, esp);
+ __ mov(ebp, Operand(esp));
// Store the arguments adaptor context sentinel.
__ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1539,7 +1515,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
- __ cmp(eax, ebx);
+ __ cmp(eax, Operand(ebx));
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
@@ -1557,8 +1533,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ inc(edi);
__ push(Operand(eax, 0));
- __ sub(eax, Immediate(kPointerSize));
- __ cmp(edi, ebx);
+ __ sub(Operand(eax), Immediate(kPointerSize));
+ __ cmp(edi, Operand(ebx));
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1571,17 +1547,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
// ebx = expected - actual.
- __ sub(ebx, eax);
+ __ sub(ebx, Operand(eax));
// eax = -actual - 1
__ neg(eax);
- __ sub(eax, Immediate(1));
+ __ sub(Operand(eax), Immediate(1));
Label copy;
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ test(eax, eax);
+ __ sub(Operand(edi), Immediate(kPointerSize));
+ __ test(eax, Operand(eax));
__ j(not_zero, &copy);
// Fill remaining expected arguments with undefined values.
@@ -1589,7 +1565,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&fill);
__ inc(eax);
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, ebx);
+ __ cmp(eax, Operand(ebx));
__ j(less, &fill);
}
@@ -1597,7 +1573,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ call(edx);
+ __ call(Operand(edx));
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
@@ -1607,13 +1583,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(edx);
+ __ jmp(Operand(edx));
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}
@@ -1640,16 +1616,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
Label skip;
- __ cmp(eax, Immediate(Smi::FromInt(-1)));
+ __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@@ -1663,9 +1638,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
+ __ Abort("Unreachable code: returned from tail call.");
__ bind(&ok);
__ ret(0);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 1e886e202b..1009aaf573 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -49,7 +49,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&check_heap_number);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
+ __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
__ j(not_equal, &call_builtin, Label::kNear);
__ ret(0);
@@ -150,7 +150,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
// Return and remove the on-stack parameter.
- __ mov(esi, eax);
+ __ mov(esi, Operand(eax));
__ ret(1 * kPointerSize);
// Need to collect. Call into runtime system.
@@ -159,77 +159,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + (1 * kPointerSize)]: function
- // [esp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function or sentinel from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
-
- // Setup the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->block_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // If this block context is nested in the global context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(ecx, 0);
- __ Assert(equal, message);
- }
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Setup the fixed slots.
- __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
- __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
- __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
-
- // Initialize the rest of the slots to the hole value.
- if (slots_ == 1) {
- __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
- factory->the_hole_value());
- } else {
- __ mov(ebx, factory->the_hole_value());
- for (int i = 0; i < slots_; i++) {
- __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ mov(esi, eax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -310,8 +239,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
Label patch;
Factory* factory = masm->isolate()->factory();
const Register argument = eax;
@@ -409,41 +336,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ pushad();
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
- }
- }
- const int argument_count = 1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, ecx);
- __ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
- }
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- }
- __ popad();
- __ ret(0);
-}
-
-
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
@@ -578,27 +470,27 @@ static void IntegerConvert(MacroAssembler* masm,
// Check whether the exponent is too big for a 64 bit signed integer.
static const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Immediate(kTooBigExponent));
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
// Reserve space for 64 bit answer.
- __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
} else {
// Load ecx with zero. We use this either for the final shift or
// for the answer.
- __ xor_(ecx, ecx);
+ __ xor_(ecx, Operand(ecx));
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Immediate(non_smi_exponent));
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
__ j(equal, &right_exponent, Label::kNear);
@@ -611,7 +503,7 @@ static void IntegerConvert(MacroAssembler* masm,
// >>> operator has a tendency to generate numbers with an exponent of 31.
const uint32_t big_non_smi_exponent =
(HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Immediate(big_non_smi_exponent));
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
// in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
@@ -630,9 +522,9 @@ static void IntegerConvert(MacroAssembler* masm,
// Shift down 21 bits to get the most significant 11 bits or the low
// mantissa word.
__ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, scratch2);
+ __ or_(ecx, Operand(scratch2));
// We have the answer in ecx, but we may need to negate it.
- __ test(scratch, scratch);
+ __ test(scratch, Operand(scratch));
__ j(positive, &done, Label::kNear);
__ neg(ecx);
__ jmp(&done, Label::kNear);
@@ -646,14 +538,14 @@ static void IntegerConvert(MacroAssembler* masm,
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, Immediate(zero_exponent));
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
// ecx already has a Smi zero.
__ j(less, &done, Label::kNear);
// We have a shifted exponent between 0 and 30 in scratch2.
__ shr(scratch2, HeapNumber::kExponentShift);
__ mov(ecx, Immediate(30));
- __ sub(ecx, scratch2);
+ __ sub(ecx, Operand(scratch2));
__ bind(&right_exponent);
// Here ecx is the shift, scratch is the exponent word.
@@ -673,19 +565,19 @@ static void IntegerConvert(MacroAssembler* masm,
// Shift down 22 bits to get the most significant 10 bits or the low
// mantissa word.
__ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, scratch);
+ __ or_(scratch2, Operand(scratch));
// Move down according to the exponent.
__ shr_cl(scratch2);
// Now the unsigned answer is in scratch2. We need to move it to ecx and
// we may need to fix the sign.
Label negative;
- __ xor_(ecx, ecx);
+ __ xor_(ecx, Operand(ecx));
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
__ j(greater, &negative, Label::kNear);
__ mov(ecx, scratch2);
__ jmp(&done, Label::kNear);
__ bind(&negative);
- __ sub(ecx, scratch2);
+ __ sub(ecx, Operand(scratch2));
__ bind(&done);
}
}
@@ -787,13 +679,13 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
__ JumpIfNotSmi(eax, non_smi, non_smi_near);
// We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(zero, slow, slow_near);
// Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, eax);
+ __ mov(edx, Operand(eax));
__ Set(eax, Immediate(0));
- __ sub(eax, edx);
+ __ sub(eax, Operand(edx));
__ j(overflow, undo, undo_near);
__ ret(0);
}
@@ -814,7 +706,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot(
void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, edx);
+ __ mov(eax, Operand(edx));
}
@@ -868,7 +760,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
Immediate(HeapNumber::kSignMask)); // Flip sign.
} else {
- __ mov(edx, eax);
+ __ mov(edx, Operand(eax));
// edx: operand
Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -876,12 +768,11 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated, Label::kNear);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
- }
+ __ EnterInternalFrame();
+ __ push(edx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(edx);
+ __ LeaveInternalFrame();
__ bind(&heapnumber_allocated);
// eax: allocated 'empty' number
@@ -924,16 +815,15 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
- }
+ __ EnterInternalFrame();
+ // Push the original HeapNumber on the stack. The integer value can't
+ // be stored since it's untagged and not in the smi range (so we can't
+ // smi-tag it). We'll recalculate the value after the GC instead.
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ // New HeapNumber is in eax.
+ __ pop(edx);
+ __ LeaveInternalFrame();
// IntegerConvert uses ebx and edi as scratch registers.
// This conversion won't go slow-case.
IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
@@ -943,7 +833,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ecx);
+ __ cvtsi2sd(xmm0, Operand(ecx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ push(ecx);
@@ -1057,10 +947,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -1136,7 +1022,7 @@ void BinaryOpStub::GenerateSmiCode(
// eax in case the result is not a smi.
ASSERT(!left.is(ecx) && !right.is(ecx));
__ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
combined = right;
break;
@@ -1148,7 +1034,7 @@ void BinaryOpStub::GenerateSmiCode(
case Token::DIV:
case Token::MOD:
__ mov(combined, right);
- __ or_(combined, left);
+ __ or_(combined, Operand(left));
break;
case Token::SHL:
@@ -1158,7 +1044,7 @@ void BinaryOpStub::GenerateSmiCode(
// for the smi check register.
ASSERT(!left.is(ecx) && !right.is(ecx));
__ mov(ecx, right);
- __ or_(right, left);
+ __ or_(right, Operand(left));
combined = right;
break;
@@ -1181,12 +1067,12 @@ void BinaryOpStub::GenerateSmiCode(
case Token::BIT_XOR:
ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
break;
case Token::BIT_AND:
ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
break;
case Token::SHL:
@@ -1235,12 +1121,12 @@ void BinaryOpStub::GenerateSmiCode(
case Token::ADD:
ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
+ __ add(right, Operand(left)); // Addition is commutative.
__ j(overflow, &use_fp_on_smis);
break;
case Token::SUB:
- __ sub(left, right);
+ __ sub(left, Operand(right));
__ j(overflow, &use_fp_on_smis);
__ mov(eax, left);
break;
@@ -1254,7 +1140,7 @@ void BinaryOpStub::GenerateSmiCode(
// Remove tag from one of the operands (but keep sign).
__ SmiUntag(right);
// Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
+ __ imul(right, Operand(left)); // Multiplication is commutative.
__ j(overflow, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(right, combined, &use_fp_on_smis);
@@ -1265,7 +1151,7 @@ void BinaryOpStub::GenerateSmiCode(
// save the left operand.
__ mov(edi, left);
// Check for 0 divisor.
- __ test(right, right);
+ __ test(right, Operand(right));
__ j(zero, &use_fp_on_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
@@ -1281,7 +1167,7 @@ void BinaryOpStub::GenerateSmiCode(
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(eax, combined, &use_fp_on_smis);
// Check that the remainder is zero.
- __ test(edx, edx);
+ __ test(edx, Operand(edx));
__ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(eax);
@@ -1289,7 +1175,7 @@ void BinaryOpStub::GenerateSmiCode(
case Token::MOD:
// Check for 0 divisor.
- __ test(right, right);
+ __ test(right, Operand(right));
__ j(zero, &not_smis);
// Sign extend left into edx:eax.
@@ -1340,11 +1226,11 @@ void BinaryOpStub::GenerateSmiCode(
break;
case Token::ADD:
// Revert right = right + left.
- __ sub(right, left);
+ __ sub(right, Operand(left));
break;
case Token::SUB:
// Revert left = left - right.
- __ add(left, right);
+ __ add(left, Operand(right));
break;
case Token::MUL:
// Right was clobbered but a copy is in ebx.
@@ -1382,7 +1268,7 @@ void BinaryOpStub::GenerateSmiCode(
ASSERT_EQ(Token::SHL, op_);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, left);
+ __ cvtsi2sd(xmm0, Operand(left));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1404,11 +1290,11 @@ void BinaryOpStub::GenerateSmiCode(
switch (op_) {
case Token::ADD:
// Revert right = right + left.
- __ sub(right, left);
+ __ sub(right, Operand(left));
break;
case Token::SUB:
// Revert left = left - right.
- __ add(left, right);
+ __ add(left, Operand(right));
break;
case Token::MUL:
// Right was clobbered but a copy is in ebx.
@@ -1600,7 +1486,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Check result type if it is currently Int32.
if (result_type_ <= BinaryOpIC::INT32) {
__ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, ecx);
+ __ cvtsi2sd(xmm2, Operand(ecx));
__ ucomisd(xmm0, xmm2);
__ j(not_zero, &not_int32);
__ j(carry, &not_int32);
@@ -1662,9 +1548,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
&not_int32);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
@@ -1688,7 +1574,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
+ __ mov(ebx, Operand(eax)); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
@@ -1708,7 +1594,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1789,7 +1675,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
__ cmp(edx, factory->undefined_value());
__ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
+ __ xor_(edx, Operand(edx));
} else {
__ mov(edx, Immediate(factory->nan_value()));
}
@@ -1798,7 +1684,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
__ cmp(eax, factory->undefined_value());
__ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
+ __ xor_(eax, Operand(eax));
} else {
__ mov(eax, Immediate(factory->nan_value()));
}
@@ -1876,9 +1762,9 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
use_sse3_,
&not_floats);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
@@ -1902,7 +1788,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
+ __ mov(ebx, Operand(eax)); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
@@ -1922,7 +1808,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2075,9 +1961,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
use_sse3_,
&call_runtime);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
@@ -2101,7 +1987,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
+ __ mov(ebx, Operand(eax)); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
@@ -2121,7 +2007,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2231,10 +2117,10 @@ void BinaryOpStub::GenerateHeapResultAllocation(
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
// Now edx can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
- __ mov(edx, ebx);
+ __ mov(edx, Operand(ebx));
__ bind(&skip_allocation);
// Use object in edx as a result holder
- __ mov(eax, edx);
+ __ mov(eax, Operand(edx));
break;
}
case OVERWRITE_RIGHT:
@@ -2292,7 +2178,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
__ sar(eax, 1);
- __ sub(esp, Immediate(2 * kPointerSize));
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
__ mov(Operand(esp, 0), eax);
__ fild_s(Operand(esp, 0));
__ fst_d(Operand(esp, 0));
@@ -2303,7 +2189,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Check if input is a HeapNumber.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
+ __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// low and high words into ebx, edx.
@@ -2315,12 +2201,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
} else { // UNTAGGED.
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
- __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
+ __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ pshufd(xmm0, xmm1, 0x1);
- __ movd(edx, xmm0);
+ __ movd(Operand(edx), xmm0);
}
- __ movd(ebx, xmm1);
+ __ movd(Operand(ebx), xmm1);
}
// ST[0] or xmm1 == double value
@@ -2329,15 +2215,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
- __ xor_(ecx, edx);
+ __ xor_(ecx, Operand(edx));
__ mov(eax, ecx);
__ sar(eax, 16);
- __ xor_(ecx, eax);
+ __ xor_(ecx, Operand(eax));
__ mov(eax, ecx);
__ sar(eax, 8);
- __ xor_(ecx, eax);
+ __ xor_(ecx, Operand(eax));
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(ecx,
+ __ and_(Operand(ecx),
Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] or xmm1 == double value.
@@ -2352,7 +2238,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(eax, cache_array_index));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
@@ -2395,10 +2281,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
+ __ add(Operand(esp), Immediate(kDoubleSize));
}
GenerateOperation(masm);
__ mov(Operand(ecx, 0), ebx);
@@ -2413,21 +2299,20 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm);
__ fstp_d(Operand(esp, 0));
__ movdbl(xmm1, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
+ __ add(Operand(esp), Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
+ __ EnterInternalFrame();
+ // Allocate an unused object bigger than a HeapNumber.
+ __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
__ Ret();
}
@@ -2444,11 +2329,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2480,13 +2364,13 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// If argument is outside the range -2^63..2^63, fsin/cos doesn't
// work. We must reduce it to the appropriate range.
__ mov(edi, edx);
- __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
+ __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
int supported_exponent_limit =
(63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(edi, Immediate(supported_exponent_limit));
+ __ cmp(Operand(edi), Immediate(supported_exponent_limit));
__ j(below, &in_range, Label::kNear);
// Check for infinity and NaN. Both return NaN for sin.
- __ cmp(edi, Immediate(0x7ff00000));
+ __ cmp(Operand(edi), Immediate(0x7ff00000));
Label non_nan_result;
__ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
@@ -2495,7 +2379,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ push(Immediate(0x7ff80000));
__ push(Immediate(0));
__ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(2 * kPointerSize));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
__ jmp(&done, Label::kNear);
__ bind(&non_nan_result);
@@ -2511,7 +2395,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fwait();
__ fnstsw_ax();
// Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(eax, Immediate(5));
+ __ test(Operand(eax), Immediate(5));
__ j(zero, &no_exceptions, Label::kNear);
__ fnclex();
__ bind(&no_exceptions);
@@ -2524,7 +2408,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fprem1();
__ fwait();
__ fnstsw_ax();
- __ test(eax, Immediate(0x400 /* C2 */));
+ __ test(Operand(eax), Immediate(0x400 /* C2 */));
// If C2 is set, computation only has partial result. Loop to
// continue computation.
__ j(not_zero, &partial_remainder_loop);
@@ -2657,13 +2541,13 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ cvtsi2sd(xmm0, Operand(edx));
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ cvtsi2sd(xmm1, Operand(eax));
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ bind(&done);
@@ -2687,12 +2571,12 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ cvtsi2sd(xmm0, Operand(edx));
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ cvtsi2sd(xmm1, Operand(eax));
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
@@ -2708,11 +2592,11 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
__ mov(scratch, left);
ASSERT(!scratch.is(right)); // We're about to clobber scratch.
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
__ mov(scratch, right);
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
}
@@ -2720,12 +2604,12 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
__ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, scratch);
+ __ cvtsi2sd(xmm2, Operand(scratch));
__ ucomisd(xmm0, xmm2);
__ j(not_zero, non_int32);
__ j(carry, non_int32);
__ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, scratch);
+ __ cvtsi2sd(xmm2, Operand(scratch));
__ ucomisd(xmm1, xmm2);
__ j(not_zero, non_int32);
__ j(carry, non_int32);
@@ -2833,7 +2717,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in xmm3 - we need this several times later on.
__ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, ecx);
+ __ cvtsi2sd(xmm3, Operand(ecx));
Label exponent_nonsmi;
Label base_nonsmi;
@@ -2844,7 +2728,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Optimized version when both exponent and base are smis.
Label powi;
__ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
+ __ cvtsi2sd(xmm0, Operand(edx));
__ jmp(&powi);
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
@@ -2886,11 +2770,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// base has the original value of the exponent - if the exponent is
// negative return 1/result.
- __ test(edx, edx);
+ __ test(edx, Operand(edx));
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, ecx);
+ __ movd(xmm0, Operand(ecx));
__ cvtss2sd(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
@@ -2913,7 +2797,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label handle_special_cases;
__ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
+ __ cvtsi2sd(xmm0, Operand(edx));
__ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
@@ -2922,7 +2806,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &call_runtime);
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
+ __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
@@ -2933,7 +2817,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Test for -0.5.
// Load xmm2 with -0.5.
__ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, ecx);
+ __ movd(xmm2, Operand(ecx));
__ cvtss2sd(xmm2, xmm2);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
@@ -2989,13 +2873,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor, Label::kNear);
// Check index against formal parameters count limit passed in
// through register eax. Use unsigned comparison to get negative
// check for free.
- __ cmp(edx, eax);
+ __ cmp(edx, Operand(eax));
__ j(above_equal, &slow, Label::kNear);
// Read the argument from the stack and return it.
@@ -3011,7 +2895,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// comparison to get negative check for free.
__ bind(&adaptor);
__ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, ecx);
+ __ cmp(edx, Operand(ecx));
__ j(above_equal, &slow, Label::kNear);
// Read the argument from the stack and return it.
@@ -3042,7 +2926,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime, Label::kNear);
// Patch the arguments.length and the parameters pointer.
@@ -3073,7 +2957,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label adaptor_frame, try_allocate;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
@@ -3092,7 +2976,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[4] = parameter count (tagged)
// esp[8] = address of receiver argument
// Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
+ __ cmp(ebx, Operand(ecx));
__ j(less_equal, &try_allocate, Label::kNear);
__ mov(ebx, ecx);
@@ -3106,7 +2990,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ test(ebx, ebx);
+ __ test(ebx, Operand(ebx));
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
@@ -3115,7 +2999,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
+ __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -3130,7 +3014,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
+ __ test(ebx, Operand(ebx));
__ j(not_zero, &has_mapped_parameters, Label::kNear);
__ mov(edi, Operand(edi,
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
@@ -3185,7 +3069,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ test(ebx, ebx);
+ __ test(ebx, Operand(ebx));
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
@@ -3209,7 +3093,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 2 * kPointerSize));
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
- __ sub(ebx, eax);
+ __ sub(ebx, Operand(eax));
__ mov(ecx, FACTORY->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
@@ -3226,12 +3110,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ jmp(&parameters_test, Label::kNear);
__ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
__ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(not_zero, &parameters_loop, Label::kNear);
__ pop(ecx);
@@ -3251,18 +3135,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label arguments_loop, arguments_test;
__ mov(ebx, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 4 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
+ __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling?
+ __ sub(Operand(edx), ebx);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
+ __ sub(Operand(edx), Immediate(kPointerSize));
__ mov(eax, Operand(edx, 0));
__ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ cmp(ebx, ecx);
+ __ cmp(ebx, Operand(ecx));
__ j(less, &arguments_loop, Label::kNear);
// Restore.
@@ -3290,7 +3174,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Label adaptor_frame, try_allocate, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// Get the length from the frame.
@@ -3309,11 +3193,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -3340,7 +3224,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(zero, &done, Label::kNear);
// Get the parameters pointer from the stack.
@@ -3362,8 +3246,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(edi, Immediate(kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
+ __ add(Operand(edi), Immediate(kPointerSize));
+ __ sub(Operand(edx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &loop);
@@ -3384,6 +3268,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
// Stack frame on entry.
// esp[0]: return address
@@ -3406,7 +3294,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, ebx);
+ __ test(ebx, Operand(ebx));
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -3427,7 +3315,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
__ j(not_equal, &runtime);
// ecx: RegExp data (FixedArray)
@@ -3437,7 +3325,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// uses the asumption that smis are 2 * their untagged value.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
__ j(above, &runtime);
@@ -3459,7 +3347,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
__ JumpIfNotSmi(eax, &runtime);
- __ cmp(eax, ebx);
+ __ cmp(eax, Operand(ebx));
__ j(above_equal, &runtime);
// ecx: RegExp data (FixedArray)
@@ -3479,8 +3367,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
- __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, eax);
+ __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, Operand(eax));
__ j(greater, &runtime);
// Reset offset for possibly sliced string.
@@ -3497,7 +3385,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
// Any other flat string must be a flat ascii string.
- __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ and_(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
__ j(zero, &seq_ascii_string, Label::kNear);
// Check for flat cons string or sliced string.
@@ -3509,7 +3398,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label cons_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(ebx, Immediate(kExternalStringTag));
+ __ cmp(Operand(ebx), Immediate(kExternalStringTag));
__ j(less, &cons_string);
__ j(equal, &runtime);
@@ -3615,14 +3504,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
__ mov(esi, FieldOperand(esi, String::kLengthOffset));
- __ add(esi, edi); // Calculate input end wrt offset.
+ __ add(esi, Operand(edi)); // Calculate input end wrt offset.
__ SmiUntag(edi);
- __ add(ebx, edi); // Calculate input start wrt offset.
+ __ add(ebx, Operand(edi)); // Calculate input start wrt offset.
// ebx: start index of the input string
// esi: end index of the input string
Label setup_two_byte, setup_rest;
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
__ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
@@ -3642,8 +3531,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&setup_rest);
// Locate the code entry and call it.
- __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(edx);
+ __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(Operand(edx));
// Drop arguments and come back to JS mode.
__ LeaveApiExitFrame();
@@ -3664,9 +3553,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
masm->isolate());
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location(
+ masm->isolate())));
__ mov(eax, Operand::StaticVariable(pending_exception));
- __ cmp(edx, eax);
+ __ cmp(edx, Operand(eax));
__ j(equal, &runtime);
// For exception, throw the exception again.
@@ -3687,7 +3578,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure to match, return null.
- __ mov(eax, factory->null_value());
+ __ mov(Operand(eax), factory->null_value());
__ ret(4 * kPointerSize);
// Load RegExp data.
@@ -3698,7 +3589,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Calculate number of capture registers (number_of_captures + 1) * 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
@@ -3714,18 +3605,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store last subject and last input.
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastSubjectOffset,
- eax,
- edi,
- kDontSaveFPRegs);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastInputOffset,
- eax,
- edi,
- kDontSaveFPRegs);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -3739,7 +3624,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ sub(edx, Immediate(1));
+ __ sub(Operand(edx), Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer.
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
@@ -3770,7 +3655,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Label done;
__ mov(ebx, Operand(esp, kPointerSize * 3));
__ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
@@ -3830,10 +3715,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// ebx: Start of elements in FixedArray.
// edx: the hole.
Label loop;
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ bind(&loop);
__ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(ecx, Immediate(1));
+ __ sub(Operand(ecx), Immediate(1));
__ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
__ jmp(&loop);
@@ -3867,7 +3752,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
+ __ sub(Operand(mask), Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -3893,7 +3778,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
// Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
+ __ and_(scratch, Operand(mask));
Register index = scratch;
Register probe = mask;
__ mov(probe,
@@ -3919,7 +3804,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ bind(&smi_hash_calculated);
// Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
+ __ and_(scratch, Operand(mask));
Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmp(object,
@@ -3971,10 +3856,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Compare two smis if required.
if (include_smi_compare_) {
Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
__ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
+ __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done, Label::kNear);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
__ bind(&smi_done);
@@ -3982,8 +3867,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ ret(0);
__ bind(&non_smi);
} else if (FLAG_debug_code) {
- __ mov(ecx, edx);
- __ or_(ecx, eax);
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected smi operands.");
}
@@ -3995,7 +3880,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// for NaN and undefined.
{
Label not_identical;
- __ cmp(eax, edx);
+ __ cmp(eax, Operand(edx));
__ j(not_equal, &not_identical);
if (cc_ != equal) {
@@ -4044,7 +3929,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
- __ add(edx, edx);
+ __ add(edx, Operand(edx));
__ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc_ == equal) {
STATIC_ASSERT(EQUAL != 1);
@@ -4078,19 +3963,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, eax);
- __ test(ecx, edx);
+ __ and_(ecx, Operand(eax));
+ __ test(ecx, Operand(edx));
__ j(not_zero, &not_smis, Label::kNear);
// One operand is a smi.
// Check whether the non-smi is a heap number.
STATIC_ASSERT(kSmiTagMask == 1);
// ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(ecx, Immediate(0x01));
+ __ sub(Operand(ecx), Immediate(0x01));
__ mov(ebx, edx);
- __ xor_(ebx, eax);
- __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, eax);
+ __ xor_(ebx, Operand(eax));
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, Operand(eax));
// if eax was smi, ebx is now edx, else eax.
// Check if the non-smi operand is a heap number.
@@ -4152,9 +4037,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
+ __ cmov(above, eax, Operand(ecx));
__ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
+ __ cmov(below, eax, Operand(ecx));
__ ret(0);
} else {
FloatingPointHelper::CheckFloatOperands(
@@ -4313,49 +4198,25 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Code* code) {
- code->set_has_function_cache(RecordCallTarget());
-}
-
-
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
- // 1 ~ size of the test eax opcode.
- Object* cell = Memory::Object_at(address + kPointerSize + 1);
- // Low-level because clearing happens during GC.
- reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value(
- RawUninitializedSentinel(heap));
-}
-
-
-Object* CallFunctionStub::GetCachedValue(Address address) {
- ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
- // 1 ~ size of the test eax opcode.
- Object* cell = Memory::Object_at(address + kPointerSize + 1);
- return JSGlobalPropertyCell::cast(cell)->value();
-}
-
-
void CallFunctionStub::Generate(MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
Label slow, non_function;
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
// function stub.
if (ReceiverMightBeImplicit()) {
- Label receiver_ok;
+ Label call;
// Get the receiver from the stack.
// +1 ~ return address
__ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
// Call as function is indicated with the hole.
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &call, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
__ mov(ebx, GlobalObjectOperand());
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
- __ bind(&receiver_ok);
+ __ bind(&call);
}
// Get the function to call from the stack.
@@ -4368,53 +4229,12 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &slow);
- if (RecordCallTarget()) {
- // Cache the called function in a global property cell in the
- // instruction stream after the call. Cache states are uninitialized,
- // monomorphic (indicated by a JSFunction), and megamorphic.
- Label initialize, call;
- // Load the cache cell address into ebx and the cache state into ecx.
- __ mov(ebx, Operand(esp, 0)); // Return address.
- __ mov(ebx, Operand(ebx, 1)); // 1 ~ sizeof 'test eax' opcode in bytes.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &call, Label::kNear);
- __ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
- __ j(equal, &call, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is a root so no write-barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(MegamorphicSentinel(isolate)));
- __ jmp(&call, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- __ mov(ecx, edi);
- __ RecordWriteField(ebx,
- JSGlobalPropertyCell::kValueOffset,
- ecx,
- edx,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, // Cells are rescanned.
- OMIT_SMI_CHECK);
-
- __ bind(&call);
- }
-
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
if (ReceiverMightBeImplicit()) {
Label call_as_function;
- __ cmp(eax, isolate->factory()->the_hole_value());
+ __ cmp(eax, masm->isolate()->factory()->the_hole_value());
__ j(equal, &call_as_function);
__ InvokeFunction(edi,
actual,
@@ -4431,14 +4251,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case.
- __ mov(ebx, Operand(esp, 0));
- __ mov(ebx, Operand(ebx, 1));
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(MegamorphicSentinel(isolate)));
- }
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
@@ -4450,7 +4262,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ SetCallKind(ecx, CALL_AS_FUNCTION);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
{
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
@@ -4462,7 +4275,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Set(ebx, Immediate(0));
__ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
@@ -4472,35 +4286,6 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- // It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
- code->set_is_pregenerated(true);
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(eax);
}
@@ -4547,7 +4332,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- __ call(ebx);
+ __ call(Operand(ebx));
// Result is in eax or edx:eax - do not destroy these registers!
if (always_allocate_scope) {
@@ -4579,7 +4364,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// should have returned some failure value.
if (FLAG_debug_code) {
__ push(edx);
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ mov(edx, Operand::StaticVariable(
+ ExternalReference::the_hole_value_location(masm->isolate())));
Label okay;
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
@@ -4590,7 +4376,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
+ __ LeaveExitFrame(save_doubles_);
__ ret(0);
// Handling of failure.
@@ -4607,8 +4393,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
+ ExternalReference the_hole_location =
+ ExternalReference::the_hole_value_location(masm->isolate());
__ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ __ mov(edx, Operand::StaticVariable(the_hole_location));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
// Special handling of termination exceptions which are uncatchable
@@ -4643,7 +4431,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// a garbage collection and retrying the builtin (twice).
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
+ __ EnterExitFrame(save_doubles_);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -4699,7 +4487,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Setup frame.
__ push(ebp);
- __ mov(ebp, esp);
+ __ mov(ebp, Operand(esp));
// Push marker in two places.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
@@ -4743,7 +4531,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
// Clear any pending exceptions.
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ ExternalReference the_hole_location =
+ ExternalReference::the_hole_value_location(masm->isolate());
+ __ mov(edx, Operand::StaticVariable(the_hole_location));
__ mov(Operand::StaticVariable(pending_exception), edx);
// Fake a receiver (NULL).
@@ -4765,7 +4555,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
__ mov(edx, Operand(edx, 0)); // deref address
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(edx);
+ __ call(Operand(edx));
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -4773,7 +4563,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ cmp(Operand(ebx),
+ Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
@@ -4787,7 +4578,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ pop(ebx);
__ pop(esi);
__ pop(edi);
- __ add(esp, Immediate(2 * kPointerSize)); // remove markers
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
__ pop(ebp);
@@ -4903,10 +4694,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
Label loop, is_instance, is_not_instance;
__ bind(&loop);
- __ cmp(scratch, prototype);
+ __ cmp(scratch, Operand(prototype));
__ j(equal, &is_instance, Label::kNear);
Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, Immediate(factory->null_value()));
+ __ cmp(Operand(scratch), Immediate(factory->null_value()));
__ j(equal, &is_not_instance, Label::kNear);
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
@@ -4997,14 +4788,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
// Call the builtin and convert 0/1 to true/false.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
+ __ EnterInternalFrame();
+ __ push(object);
+ __ push(function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
Label true_value, done;
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(zero, &true_value, Label::kNear);
__ mov(eax, factory->false_value());
__ jmp(&done, Label::kNear);
@@ -5115,24 +4905,22 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Immediate(masm->isolate()->factory()->empty_string()));
__ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ mov(result_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string, Label::kNear);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ mov(result_, FieldOperand(object_, SlicedString::kParentOffset));
+ __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ mov(result_, FieldOperand(result_, HeapObject::kMapOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
- // Actually fetch the parent string if it is confirmed to be sequential.
- STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
- __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
+ __ jmp(&flat_string, Label::kNear);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -5322,7 +5110,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in eax.
Counters* counters = masm->isolate()->counters();
@@ -5331,7 +5119,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, ebx);
+ __ test(ebx, Operand(ebx));
__ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in edx.
__ mov(eax, edx);
@@ -5346,13 +5134,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
- __ add(ebx, ecx);
+ __ add(ebx, Operand(ecx));
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime);
// Use the symbol table when adding two one character strings, as it
// helps later optimizations to return a symbol here.
- __ cmp(ebx, Immediate(Smi::FromInt(2)));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
// Check that both strings are non-external ascii strings.
@@ -5389,7 +5177,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&string_add_runtime);
// Pack both characters in ebx.
__ shl(ecx, kBitsPerByte);
- __ or_(ebx, ecx);
+ __ or_(ebx, Operand(ecx));
// Set the characters in the new string.
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
@@ -5397,7 +5185,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
__ j(below, &string_add_flat_result);
// If result is not supposed to be flat allocate a cons string object. If both
@@ -5407,7 +5195,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, edi);
+ __ and_(ecx, Operand(edi));
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ecx, Immediate(kStringEncodingMask));
@@ -5435,7 +5223,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &ascii_data);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, ecx);
+ __ xor_(edi, Operand(ecx));
STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
@@ -5483,12 +5271,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
// edx: first char of first argument
@@ -5498,7 +5286,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5522,13 +5310,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx,
+ __ add(Operand(ecx),
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx,
+ __ add(Operand(edx),
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
@@ -5539,7 +5327,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5615,15 +5403,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
if (ascii) {
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
} else {
__ mov_w(scratch, Operand(src, 0));
__ mov_w(Operand(dest, 0), scratch);
- __ add(src, Immediate(2));
- __ add(dest, Immediate(2));
+ __ add(Operand(src), Immediate(2));
+ __ add(Operand(dest), Immediate(2));
}
- __ sub(count, Immediate(1));
+ __ sub(Operand(count), Immediate(1));
__ j(not_zero, &loop);
}
@@ -5646,7 +5434,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Nothing to do for zero characters.
Label done;
- __ test(count, count);
+ __ test(count, Operand(count));
__ j(zero, &done);
// Make count the number of bytes to copy.
@@ -5671,7 +5459,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Check if there are more bytes to copy.
__ bind(&last_bytes);
- __ test(count, count);
+ __ test(count, Operand(count));
__ j(zero, &done);
// Copy remaining characters.
@@ -5679,9 +5467,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- __ sub(count, Immediate(1));
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ __ sub(Operand(count), Immediate(1));
__ j(not_zero, &loop);
__ bind(&done);
@@ -5703,12 +5491,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index;
__ mov(scratch, c1);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
__ j(above, &not_array_index, Label::kNear);
__ mov(scratch, c2);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_probed);
__ bind(&not_array_index);
@@ -5721,7 +5509,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Collect the two characters in a register.
Register chars = c1;
__ shl(c2, kBitsPerByte);
- __ or_(chars, c2);
+ __ or_(chars, Operand(c2));
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
@@ -5738,7 +5526,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register mask = scratch2;
__ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ SmiUntag(mask);
- __ sub(mask, Immediate(1));
+ __ sub(Operand(mask), Immediate(1));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
@@ -5755,9 +5543,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Calculate entry in symbol table.
__ mov(scratch, hash);
if (i > 0) {
- __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
}
- __ and_(scratch, mask);
+ __ and_(scratch, Operand(mask));
// Load the entry from the symbol table.
Register candidate = scratch; // Scratch register contains candidate.
@@ -5794,7 +5582,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
__ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
__ and_(temp, 0x0000ffff);
- __ cmp(chars, temp);
+ __ cmp(chars, Operand(temp));
__ j(equal, &found_in_symbol_table);
__ bind(&next_probe_pop_mask[i]);
__ pop(mask);
@@ -5821,11 +5609,11 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
// hash = character + (character << 10);
__ mov(hash, character);
__ shl(hash, 10);
- __ add(hash, character);
+ __ add(hash, Operand(character));
// hash ^= hash >> 6;
__ mov(scratch, hash);
__ sar(scratch, 6);
- __ xor_(hash, scratch);
+ __ xor_(hash, Operand(scratch));
}
@@ -5834,15 +5622,15 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Register character,
Register scratch) {
// hash += character;
- __ add(hash, character);
+ __ add(hash, Operand(character));
// hash += hash << 10;
__ mov(scratch, hash);
__ shl(scratch, 10);
- __ add(hash, scratch);
+ __ add(hash, Operand(scratch));
// hash ^= hash >> 6;
__ mov(scratch, hash);
__ sar(scratch, 6);
- __ xor_(hash, scratch);
+ __ xor_(hash, Operand(scratch));
}
@@ -5852,19 +5640,19 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// hash += hash << 3;
__ mov(scratch, hash);
__ shl(scratch, 3);
- __ add(hash, scratch);
+ __ add(hash, Operand(scratch));
// hash ^= hash >> 11;
__ mov(scratch, hash);
__ sar(scratch, 11);
- __ xor_(hash, scratch);
+ __ xor_(hash, Operand(scratch));
// hash += hash << 15;
__ mov(scratch, hash);
__ shl(scratch, 15);
- __ add(hash, scratch);
+ __ add(hash, Operand(scratch));
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ test(hash, hash);
+ __ test(hash, Operand(hash));
__ j(not_zero, &hash_not_zero, Label::kNear);
__ mov(hash, Immediate(27));
__ bind(&hash_not_zero);
@@ -5896,7 +5684,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
__ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, edx);
+ __ sub(ecx, Operand(edx));
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
__ j(equal, &return_eax);
@@ -6028,13 +5816,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
__ SmiUntag(ebx);
- __ add(esi, ebx);
+ __ add(esi, Operand(ebx));
// eax: result string
// ecx: result length
@@ -6063,17 +5851,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(edi,
+ __ add(Operand(edi),
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(Operand(esi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, ebx);
+ __ add(esi, Operand(ebx));
// eax: result string
// ecx: result length
@@ -6113,7 +5902,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ test(length, length);
+ __ test(length, Operand(length));
__ j(not_zero, &compare_chars, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -6148,14 +5937,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ j(less_equal, &left_shorter, Label::kNear);
// Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, length_delta);
+ __ sub(scratch1, Operand(length_delta));
__ bind(&left_shorter);
Register min_length = scratch1;
// If either length is zero, just compare lengths.
Label compare_lengths;
- __ test(min_length, min_length);
+ __ test(min_length, Operand(min_length));
__ j(zero, &compare_lengths, Label::kNear);
// Compare characters.
@@ -6165,7 +5954,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- __ test(length_delta, length_delta);
+ __ test(length_delta, Operand(length_delta));
__ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
@@ -6214,7 +6003,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
__ mov_b(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ add(index, Immediate(1));
+ __ add(Operand(index), Immediate(1));
__ j(not_zero, &loop);
}
@@ -6231,7 +6020,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right
Label not_same;
- __ cmp(edx, eax);
+ __ cmp(edx, Operand(eax));
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -6247,7 +6036,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Compare flat ascii strings.
// Drop arguments from the stack.
__ pop(ecx);
- __ add(esp, Immediate(2 * kPointerSize));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
__ push(ecx);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
@@ -6261,16 +6050,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
__ JumpIfNotSmi(ecx, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ sub(eax, edx);
+ __ sub(eax, Operand(edx));
} else {
Label done;
- __ sub(edx, eax);
+ __ sub(edx, Operand(eax));
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ not_(edx);
@@ -6290,8 +6079,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
Label generic_stub;
Label unordered;
Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
+ __ mov(ecx, Operand(edx));
+ __ and_(ecx, Operand(eax));
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
@@ -6319,9 +6108,9 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Performing mov, because xor would destroy the flag register.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
+ __ cmov(above, eax, Operand(ecx));
__ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
+ __ cmov(below, eax, Operand(ecx));
__ ret(0);
__ bind(&unordered);
@@ -6348,9 +6137,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
Label miss;
- __ mov(tmp1, left);
+ __ mov(tmp1, Operand(left));
STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
+ __ and_(tmp1, Operand(right));
__ JumpIfSmi(tmp1, &miss, Label::kNear);
// Check that both operands are symbols.
@@ -6359,13 +6148,13 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
+ __ and_(tmp1, Operand(tmp2));
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &miss, Label::kNear);
// Symbols are compared by identity.
Label done;
- __ cmp(left, right);
+ __ cmp(left, Operand(right));
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
@@ -6394,9 +6183,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
Register tmp3 = edi;
// Check that both operands are heap objects.
- __ mov(tmp1, left);
+ __ mov(tmp1, Operand(left));
STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
+ __ and_(tmp1, Operand(right));
__ JumpIfSmi(tmp1, &miss);
// Check that both operands are strings. This leaves the instance
@@ -6407,13 +6196,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ mov(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ or_(tmp3, Operand(tmp2));
__ test(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmp(left, right);
+ __ cmp(left, Operand(right));
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -6427,7 +6216,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// because we already know they are not identical.
Label do_compare;
STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
+ __ and_(tmp1, Operand(tmp2));
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
@@ -6460,8 +6249,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
+ __ mov(ecx, Operand(edx));
+ __ and_(ecx, Operand(eax));
__ JumpIfSmi(ecx, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
@@ -6470,7 +6259,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ sub(eax, edx);
+ __ sub(eax, Operand(edx));
__ ret(0);
__ bind(&miss);
@@ -6485,16 +6274,15 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(eax);
__ push(ecx);
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
- __ CallExternalReference(miss, 3);
- }
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ masm->isolate());
+ __ EnterInternalFrame();
+ __ push(edx);
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -6506,7 +6294,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(ecx);
// Do a tail call to the rewritten stub.
- __ jmp(edi);
+ __ jmp(Operand(edi));
}
@@ -6535,8 +6323,8 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
- __ and_(index,
- Immediate(Smi::FromInt(name->Hash() +
+ __ and_(Operand(index),
+ Immediate(Smi::FromInt(name->Hash() +
StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
@@ -6569,7 +6357,7 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
__ push(Immediate(name->Hash()));
MaybeObject* result = masm->TryCallStub(&stub);
if (result->IsFailure()) return result;
- __ test(r0, r0);
+ __ test(r0, Operand(r0));
__ j(not_zero, miss);
__ jmp(done);
return result;
@@ -6602,9 +6390,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ mov(r0, FieldOperand(name, String::kHashFieldOffset));
__ shr(r0, String::kHashShift);
if (i > 0) {
- __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r0, r1);
+ __ and_(r0, Operand(r1));
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
@@ -6628,15 +6416,13 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ push(r0);
__ CallStub(&stub);
- __ test(r1, r1);
+ __ test(r1, Operand(r1));
__ j(zero, miss);
__ jmp(done);
}
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
// esp[0 * kPointerSize]: return address.
// esp[1 * kPointerSize]: key's hash.
@@ -6667,7 +6453,8 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(scratch, Operand(esp, 2 * kPointerSize));
if (i > 0) {
- __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(Operand(scratch),
+ Immediate(StringDictionary::GetProbeOffset(i)));
}
__ and_(scratch, Operand(esp, 0));
@@ -6723,275 +6510,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { ebx, eax, edi, EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { ebx, ecx, edx, EMIT_REMEMBERED_SET },
- { ebx, edi, edx, OMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal and CallFunctionStub.
- { ebx, ecx, edx, OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { edx, ecx, ebx, EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { ebx, ecx, edx, EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore
- { ebx, edi, edx, EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { ebx, edx, ecx, EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { edi, edx, ecx, EMIT_REMEMBERED_SET},
- // Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
-};
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
-
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (CpuFeatures::IsSupported(SSE2)) {
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode()->set_is_pregenerated(true);
- }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm,
- kReturnOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- } else {
- ASSERT(mode == INCREMENTAL);
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
- }
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label object_is_black, need_incremental, need_incremental_pop_object;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &object_is_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&object_is_black);
-
- // Get the value from the slot.
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- not_zero,
- &ensure_not_white,
- Label::kNear);
-
- __ jmp(&need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 2a7d316f47..fa255da1fd 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -60,25 +60,6 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -437,8 +418,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -451,7 +430,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -472,272 +451,6 @@ class StringDictionaryLookupStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- ASSERT(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always ecx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
- if (scratch0.is(ecx)) {
- scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
- }
- if (object.is(ecx)) {
- object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(ecx)) {
- address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
- }
- ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!address_orig_.is(object_));
- ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->push(ecx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->mov(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->mov(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with ecx.
- if (!object_.is(object_orig_)) {
- masm->mov(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->mov(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->pop(ecx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The caller saved
- // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- masm->sub(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
- // Save all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
- }
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- // Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
- }
- masm->add(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
- }
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always ecx.
-
- Register GetRegThatIsNotEcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- }
-;
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 3> {};
- class ValueBits: public BitField<int, 3, 3> {};
- class AddressBits: public BitField<int, 6, 3> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- RegisterAllocation regs_;
-};
-
-
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index f901b6f888..3a657bd541 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -39,16 +39,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
+ masm->EnterInternalFrame();
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
+ masm->LeaveInternalFrame();
}
@@ -112,14 +108,14 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
+ __ add(Operand(edx), Immediate(16));
+ __ add(dst, Operand(edx));
+ __ add(src, Operand(edx));
+ __ sub(Operand(count), edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
- __ test(src, Immediate(0x0F));
+ __ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
// Copy loop for aligned source and destination.
@@ -134,11 +130,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
+ __ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
+ __ add(Operand(dst), Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@@ -146,12 +142,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
- __ test(count, Immediate(0x10));
+ __ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
+ __ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
+ __ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -180,11 +176,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
+ __ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
+ __ add(Operand(dst), Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@@ -192,12 +188,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
- __ test(count, Immediate(0x10));
+ __ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
+ __ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
+ __ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -232,10 +228,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
- __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
+ __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
+ __ add(dst, Operand(edx));
+ __ add(src, Operand(edx));
+ __ sub(Operand(count), edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index d7184ed208..2389948866 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -100,64 +100,63 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
- }
- __ SmiTag(reg);
- __ push(reg);
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, "Unable to encode value as smi");
}
+ __ SmiTag(reg);
+ __ push(reg);
}
+ }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values containing object pointers from the
- // expression stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
- }
+ __ Set(eax, Immediate(0)); // No arguments.
+ __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values containing object pointers from the expression
+ // stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, Immediate(kDebugZapValue));
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
}
-
- // Get rid of the internal frame.
}
+ // Get rid of the internal frame.
+ __ LeaveInternalFrame();
+
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ add(esp, Immediate(kPointerSize));
+ __ add(Operand(esp), Immediate(kPointerSize));
}
// Now that the break point has been handled, resume normal execution by
@@ -299,7 +298,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
+ __ jmp(Operand(edx));
}
const bool Debug::kFrameDropperSupported = true;
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 02cc4ebd3b..e23f3e9eff 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -116,7 +116,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@@ -174,8 +174,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
+ reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@@ -206,11 +205,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -227,8 +221,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -257,13 +250,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
-
- RelocInfo rinfo(call_target_address,
- RelocInfo::CODE_TARGET,
- 0,
- unoptimized_code);
- unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- unoptimized_code, &rinfo, replacement_code);
}
@@ -282,9 +268,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->
- RecordCodeTargetPatch(call_target_address, check_code);
}
@@ -432,14 +415,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & 0x4) == 0) {
- // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
- output_[0]->SetRegister(ebp.code(), frame_pointer);
+ output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -504,11 +480,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -559,9 +533,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
- == fp_value);
+ ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@@ -666,7 +638,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
- __ sub(esp, Immediate(kDoubleRegsSize));
+ __ sub(Operand(esp), Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
@@ -690,7 +662,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
- __ sub(edx, ebp);
+ __ sub(edx, Operand(ebp));
__ neg(edx);
// Allocate a new deoptimizer object.
@@ -703,10 +675,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
@@ -729,15 +698,15 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
+ __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
+ __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, esp);
+ __ add(ecx, Operand(esp));
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
@@ -746,43 +715,18 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
- __ add(edx, Immediate(sizeof(uint32_t)));
- __ cmp(ecx, esp);
+ __ add(Operand(edx), Immediate(sizeof(uint32_t)));
+ __ cmp(ecx, Operand(esp));
__ j(not_equal, &pop_loop);
- // If frame was dynamically aligned, pop padding.
- Label sentinel, sentinel_done;
- __ pop(ecx);
- __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ j(equal, &sentinel);
- __ push(ecx);
- __ jmp(&sentinel_done);
- __ bind(&sentinel);
- __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(1));
- __ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
__ pop(eax);
- if (type() == OSR) {
- // If alignment padding is added, push the sentinel.
- Label no_osr_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_osr_padding, Label::kNear);
- __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ bind(&no_osr_padding);
- }
-
-
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
@@ -795,12 +739,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
- __ sub(ecx, Immediate(sizeof(uint32_t)));
+ __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kPointerSize));
- __ cmp(eax, edx);
+ __ add(Operand(eax), Immediate(kPointerSize));
+ __ cmp(eax, Operand(edx));
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 04edc5f427..a936277b2f 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -55,7 +55,6 @@ struct ByteMnemonic {
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
{0x03, "add", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
@@ -118,19 +117,6 @@ static const ByteMnemonic short_immediate_instr[] = {
};
-// Generally we don't want to generate these because they are subject to partial
-// register stalls. They are included for completeness and because the cmp
-// variant is used by the RecordWrite stub. Because it does not update the
-// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
- {0x0c, "or", UNSET_OP_ORDER},
- {0x24, "and", UNSET_OP_ORDER},
- {0x34, "xor", UNSET_OP_ORDER},
- {0x3c, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc",
/*4*/ "jz", "jnz", "jna", "ja",
@@ -163,8 +149,7 @@ enum InstructionType {
REGISTER_INSTR,
MOVE_REG_INSTR,
CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR,
- BYTE_IMMEDIATE_INSTR
+ SHORT_IMMEDIATE_INSTR
};
@@ -213,7 +198,6 @@ void InstructionTable::Init() {
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
CopyTable(call_jump_instr, CALL_JUMP_INSTR);
CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
AddJumpConditionalShort();
SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@@ -928,12 +912,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
- case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
- data += 2;
- break;
- }
-
case NO_INSTR:
processed = false;
break;
@@ -1368,6 +1346,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
+ case 0x2C:
+ AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ data += 2;
+ break;
+
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 33d5cabad7..ca6ce6e31a 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function calls.
if (info->is_strict_mode() || info->is_native()) {
Label ok;
- __ test(ecx, ecx);
+ __ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -147,11 +147,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -205,12 +200,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering esi.
+ __ mov(ecx, esi);
+ __ RecordWrite(ecx, context_offset, eax, ebx);
}
}
}
@@ -266,7 +260,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -371,10 +365,10 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::verify_stack_height() {
ASSERT(FLAG_verify_stack_height);
- __ sub(ebp, Immediate(kPointerSize * stack_height()));
- __ cmp(ebp, esp);
+ __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
+ __ cmp(ebp, Operand(esp));
__ Assert(equal, "Full codegen stack height not as expected.");
- __ add(ebp, Immediate(kPointerSize * stack_height()));
+ __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
}
@@ -603,7 +597,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub, condition->test_id());
- __ test(result_register(), result_register());
+ __ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -667,12 +661,11 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ mov(location, src);
-
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
+ __ RecordWrite(scratch0, offset, src, scratch1);
}
}
@@ -704,7 +697,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@@ -722,7 +715,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(StackOperand(variable), result_register());
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
@@ -745,16 +738,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- result_register(),
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ int offset = Context::SlotOffset(variable->index());
+ __ mov(ebx, esi);
+ __ RecordWrite(ebx, offset, result_register(), ecx);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
@@ -768,8 +756,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ push(esi);
__ push(Immediate(variable->name()));
// Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == VAR || mode == CONST || mode == LET);
- PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST ||
+ mode == Variable::LET);
+ PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -778,7 +768,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
increment_stack_height(3);
if (function != NULL) {
VisitForStackValue(function);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
increment_stack_height();
} else {
@@ -845,10 +835,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
- __ or_(ecx, eax);
+ __ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, eax);
+ __ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -860,7 +850,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -949,7 +939,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
- __ cmp(ecx, eax);
+ __ cmp(ecx, Operand(eax));
__ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array());
@@ -1031,9 +1021,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(equal, loop_statement.continue_label());
- __ mov(ebx, eax);
+ __ mov(ebx, Operand(eax));
// Update the 'each' property or variable from the possibly filtered
// entry in register ebx.
@@ -1057,7 +1047,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ add(esp, Immediate(5 * kPointerSize));
+ __ add(Operand(esp), Immediate(5 * kPointerSize));
decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth.
@@ -1199,22 +1189,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
+ if (var->mode() == Variable::DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
+ } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == LET) {
+ if (local->mode() == Variable::CONST) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- if (local->mode() == CONST) {
- __ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
+ __ mov(eax, isolate()->factory()->undefined_value());
}
__ jmp(done);
}
@@ -1247,7 +1231,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != LET && var->mode() != CONST) {
+ if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
@@ -1255,10 +1239,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET) {
+ if (var->mode() == Variable::LET) {
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else { // CONST
+ } else { // Variable::CONST
__ mov(eax, isolate()->factory()->undefined_value());
}
__ bind(&done);
@@ -1496,18 +1480,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(FieldOperand(ebx, offset), result_register());
- Label no_map_change;
- __ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
- __ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear);
- __ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kNonSmiElementStored, 1);
- __ bind(&no_map_change);
+ __ RecordWrite(ebx, offset, result_register(), ecx);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1667,7 +1641,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ pop(edx);
decrement_stack_height();
__ mov(ecx, eax);
- __ or_(eax, edx);
+ __ or_(eax, Operand(edx));
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
@@ -1717,32 +1691,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
- __ add(eax, ecx);
+ __ add(eax, Operand(ecx));
__ j(overflow, &stub_call);
break;
case Token::SUB:
- __ sub(eax, ecx);
+ __ sub(eax, Operand(ecx));
__ j(overflow, &stub_call);
break;
case Token::MUL: {
__ SmiUntag(eax);
- __ imul(eax, ecx);
+ __ imul(eax, Operand(ecx));
__ j(overflow, &stub_call);
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
__ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
- __ or_(ebx, ecx);
+ __ or_(ebx, Operand(ecx));
__ j(negative, &stub_call);
break;
}
case Token::BIT_OR:
- __ or_(eax, ecx);
+ __ or_(eax, Operand(ecx));
break;
case Token::BIT_AND:
- __ and_(eax, ecx);
+ __ and_(eax, Operand(ecx));
break;
case Token::BIT_XOR:
- __ xor_(eax, ecx);
+ __ xor_(eax, Operand(ecx));
break;
default:
UNREACHABLE();
@@ -1864,7 +1838,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(eax); // Value.
@@ -1885,12 +1859,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
}
}
- } else if (var->mode() != CONST) {
+ } else if (var->mode() != Variable::CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
@@ -1904,8 +1877,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
}
} else {
ASSERT(var->IsLookupSlot());
@@ -2097,29 +2069,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code, but not in the snapshot.
- bool record_call_target = !Serializer::enabled();
- if (record_call_target) {
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- }
CallFunctionStub stub(arg_count, flags);
__ CallStub(&stub);
- if (record_call_target) {
- // There is a one element cache in the instruction stream.
-#ifdef DEBUG
- int return_site_offset = masm()->pc_offset();
-#endif
- Handle<Object> uninitialized =
- CallFunctionStub::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- __ test(eax, Immediate(cell));
- // Patching code in the stub assumes the opcode is 1 byte and there is
- // word for a pointer in the operand.
- ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
- }
-
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2143,8 +2094,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
- StrictModeFlag strict_mode =
- FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
+ StrictModeFlag strict_mode = strict_mode_flag();
+ if (FLAG_harmony_block_scoping) {
+ strict_mode = kStrictMode;
+ }
__ push(Immediate(Smi::FromInt(strict_mode)));
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
@@ -2187,7 +2140,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
+ if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@@ -2485,9 +2438,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
- __ add(ebx,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ add(Operand(ebx),
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@@ -2496,9 +2449,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ add(ebx, Immediate(kPointerSize));
+ __ add(Operand(ebx), Immediate(kPointerSize));
__ bind(&entry);
- __ cmp(ebx, ecx);
+ __ cmp(ebx, Operand(ecx));
__ j(not_equal, &loop);
// Reload map as register ebx was used as temporary above.
@@ -2638,7 +2591,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(ebx);
decrement_stack_height();
- __ cmp(eax, ebx);
+ __ cmp(eax, Operand(ebx));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2694,24 +2647,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
// Map is now in eax.
__ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
+
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
+
+ // Check if the constructor in the map is a function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &non_function_constructor);
@@ -2792,8 +2741,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, ebx);
- __ movd(xmm0, eax);
+ __ movd(xmm1, Operand(ebx));
+ __ movd(xmm0, Operand(eax));
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
@@ -2894,11 +2843,10 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ mov(edx, eax);
- __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
+ __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
__ bind(&done);
context()->Plug(eax);
@@ -3171,14 +3119,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
- __ or_(temp, index_2);
+ __ or_(temp, Operand(index_2));
__ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
- __ cmp(temp, index_1);
+ __ cmp(temp, Operand(index_1));
__ j(below_equal, &slow_case);
- __ cmp(temp, index_2);
+ __ cmp(temp, Operand(index_2));
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
@@ -3191,35 +3139,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(Operand(index_2, 0), object);
__ mov(Operand(index_1, 0), temp);
- Label no_remembered_set;
- __ CheckPageFlag(elements,
- temp,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &no_remembered_set,
- Label::kNear);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- // We are swapping two objects in an array and the incremental marker never
- // pauses in the middle of scanning a single object. Therefore the
- // incremental marker is not disturbed, so we don't need to call the
- // RecordWrite stub that notifies the incremental marker.
- __ RememberedSetHelper(elements,
- index_1,
- temp,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
- __ RememberedSetHelper(elements,
- index_2,
- temp,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
-
- __ bind(&no_remembered_set);
+ Label new_space;
+ __ InNewSpace(elements, temp, equal, &new_space);
+ __ mov(object, elements);
+ __ RecordWriteHelper(object, index_1, temp);
+ __ RecordWriteHelper(elements, index_2, temp);
+
+ __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
- __ add(esp, Immediate(3 * kPointerSize));
+ __ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
@@ -3292,11 +3221,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ pop(left);
Label done, fail, ok;
- __ cmp(left, right);
+ __ cmp(left, Operand(right));
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
- __ and_(tmp, right);
+ __ and_(Operand(tmp), right);
__ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@@ -3387,7 +3316,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
- __ sub(esp, Immediate(2 * kPointerSize));
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3423,7 +3352,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
if (FLAG_debug_code) {
- __ cmp(index, array_length);
+ __ cmp(index, Operand(array_length));
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
@@ -3441,8 +3370,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
- __ add(index, Immediate(1));
- __ cmp(index, array_length);
+ __ add(Operand(index), Immediate(1));
+ __ cmp(index, Operand(array_length));
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
@@ -3476,10 +3405,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, scratch); // May be negative, temporarily.
+ __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
- __ add(string_length, scratch);
+ __ add(string_length, Operand(scratch));
__ j(overflow, &bailout);
__ shr(string_length, 1);
@@ -3520,7 +3449,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
+ __ add(Operand(index), Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
@@ -3561,7 +3490,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
+ __ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
@@ -3602,7 +3531,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
+ __ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
@@ -3614,7 +3543,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(esp, Immediate(3 * kPointerSize));
+ __ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
decrement_stack_height();
@@ -3894,9 +3823,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
}
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
@@ -3906,9 +3835,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
- __ add(eax, Immediate(Smi::FromInt(1)));
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
}
}
@@ -4027,14 +3956,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@@ -4073,11 +3998,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
- __ j(equal, if_true);
- __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
- Split(equal, if_true, if_false, fall_through);
+ __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
+ Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
@@ -4095,7 +4017,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
- context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ Split(equal, if_true, if_false, fall_through);
}
@@ -4103,12 +4036,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
+
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4116,9 +4046,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+ context()->Plug(if_true, if_false);
+ return;
+ }
+
Token::Value op = expr->op();
VisitForStackValue(expr->left());
- switch (op) {
+ switch (expr->op()) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
@@ -4134,7 +4071,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ CallStub(&stub);
decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4180,10 +4117,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, eax);
+ __ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4195,7 +4132,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, eax);
+ __ test(eax, Operand(eax));
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4206,9 +4143,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4216,20 +4151,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(sub_expr);
+ VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Handle<Object> nil_value = nil == kNullValue ?
- isolate()->factory()->null_value() :
- isolate()->factory()->undefined_value();
- __ cmp(eax, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
+
+ __ cmp(eax, isolate()->factory()->null_value());
+ if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
} else {
- Handle<Object> other_nil_value = nil == kNullValue ?
- isolate()->factory()->undefined_value() :
- isolate()->factory()->null_value();
__ j(equal, if_true);
- __ cmp(eax, other_nil_value);
+ __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
@@ -4296,7 +4226,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ pop(edx);
- __ sub(edx, Immediate(masm_->CodeObject()));
+ __ sub(Operand(edx), Immediate(masm_->CodeObject()));
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
@@ -4312,8 +4242,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
- __ add(edx, Immediate(masm_->CodeObject()));
- __ jmp(edx);
+ __ add(Operand(edx), Immediate(masm_->CodeObject()));
+ __ jmp(Operand(edx));
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 8a98b179d3..9b5cc56401 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ mov(r1, value);
- __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
+ __ RecordWrite(elements, r0, r1);
}
@@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
+ __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, Immediate(Smi::FromInt(2)));
- __ cmp(key, scratch2);
+ __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, scratch);
+ __ cmp(key, Operand(scratch));
__ j(greater_equal, slow_case);
return FieldOperand(backing_store,
key,
@@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
- __ xor_(ecx, edi);
+ __ xor_(ecx, Operand(edi));
__ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
@@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
- __ add(edi, Immediate(kPointerSize));
+ __ add(Operand(edi), Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
@@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, ecx);
+ __ sub(edi, Operand(ecx));
__ j(above_equal, &property_array_property);
// Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(ecx, edi);
+ __ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
- __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
+ __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
@@ -710,7 +710,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(mapped_location, eax);
__ lea(ecx, mapped_location);
__ mov(edx, eax);
- __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
+ __ RecordWrite(ebx, ecx, edx);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx.
@@ -719,7 +719,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(unmapped_location, eax);
__ lea(edi, unmapped_location);
__ mov(edx, eax);
- __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
+ __ RecordWrite(ebx, edi, edx);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -734,9 +734,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label check_if_double_array, array, extra;
+ Label slow, fast, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
@@ -752,18 +750,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(edi, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
// edx: JSObject
// ecx: key (a smi)
- // edi: receiver map
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object_with_map_check);
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode and writable.
+ __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+ __ j(below, &fast);
// Slow case: call runtime.
__ bind(&slow);
@@ -776,28 +778,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
- // ebx: receiver->elements, a FixedArray
- // edi: receiver map
+ // edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
// do not leave holes in the array:
__ j(not_equal, &slow);
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &check_if_double_array);
- // Add 1 to receiver->length, and go to common element store code for Objects.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&fast_object_without_map_check);
-
- __ bind(&check_if_double_array);
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- // Add 1 to receiver->length, and go to common element store code for doubles.
+ // Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -806,54 +796,24 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
- // Check the key against the length in the array and fall through to the
- // common store code.
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
- // Fast case: Do the store, could either Object or double.
- __ bind(&fast_object_with_map_check);
+ // Fast case: Do the store.
+ __ bind(&fast);
// eax: value
// ecx: key (a smi)
// edx: receiver
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(eax, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Escape to slow case when writing non-smi into smi-only array.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &slow, Label::kNear);
-
- // Fast elements array, store the value to the elements backing store.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+ // edi: FixedArray receiver->elements
+ __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
// Update write barrier for the elements array address.
- __ mov(edx, eax); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ bind(&fast_double_without_map_check);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
+ __ mov(edx, Operand(eax));
+ __ RecordWrite(edi, 0, edx, ecx);
__ ret(0);
}
@@ -991,22 +951,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
+ // Push the receiver and the name of the function.
+ __ push(edx);
+ __ push(ecx);
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
+ // Call the entry.
+ CEntryStub stub(1);
+ __ mov(eax, Immediate(2));
+ __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
+ __ CallStub(&stub);
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
+ // Move result to edi and exit the internal frame.
+ __ mov(edi, eax);
+ __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -1151,17 +1111,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- // Leave the internal frame.
- }
-
+ __ EnterInternalFrame();
+ __ push(ecx); // save the key
+ __ push(edx); // pass the receiver
+ __ push(ecx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(ecx); // restore the key
+ __ LeaveInternalFrame();
__ mov(edi, eax);
__ jmp(&do_call);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 9e1fd34af3..4e3ea98161 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -70,17 +70,6 @@ bool LCodeGen::GenerateCode() {
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
-
- CodeStub::GenerateFPStubs();
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
- info()->osr_ast_id() != AstNode::kNoNumber;
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -155,29 +144,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
- if (dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0),
- Immediate(isolate()->factory()->frame_alignment_marker()));
-
- __ bind(&do_not_pad);
- }
-
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -238,12 +204,11 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use a third register to avoid
+ // clobbering esi.
+ __ mov(ecx, esi);
+ __ RecordWrite(ecx, context_offset, eax, ebx);
}
}
Comment(";;; End allocate local context");
@@ -295,9 +260,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@@ -519,18 +481,14 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
+ ASSERT(context->IsRegister() || context->IsStackSlot());
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
}
- } else if (context->IsStackSlot()) {
- __ mov(esi, ToOperand(context));
- } else if (context->IsConstantOperand()) {
- Handle<Object> literal =
- chunk_->LookupLiteral(LConstantOperand::cast(context));
- LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
- UNREACHABLE();
+ // Context is stack slot.
+ __ mov(esi, ToOperand(context));
}
__ CallRuntimeSaveDoubles(id);
@@ -711,7 +669,7 @@ void LCodeGen::RecordSafepoint(
int arguments,
int deoptimization_index) {
ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@@ -1242,13 +1200,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value();
- if (handle->IsHeapObject()) {
- LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
- } else {
- __ Set(reg, Immediate(handle));
- }
+ ASSERT(instr->result()->IsRegister());
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
@@ -1624,33 +1577,23 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
+ // TODO(fsc): If the expression is known to be a smi, then it's
+ // definitely not null. Jump to the false block.
int true_block = chunk_->LookupDestination(instr->true_block_id());
- Handle<Object> nil_value = instr->nil() == kNullValue ?
- factory()->null_value() :
- factory()->undefined_value();
- __ cmp(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ cmp(reg, factory()->null_value());
+ if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
- Handle<Object> other_nil_value = instr->nil() == kNullValue ?
- factory()->undefined_value() :
- factory()->null_value();
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ cmp(reg, other_nil_value);
+ __ cmp(reg, factory()->undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1802,36 +1745,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpb(Operand(temp2),
- static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(above_equal, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1916,8 +1851,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+
Label* map_check() { return &map_check_; }
+
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -2055,17 +1991,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
- if (dynamic_frame_alignment_) {
- Label aligned;
- // Frame alignment marker (padding) is below arguments,
- // and receiver, so its return-address-relative offset is
- // (num_arguments + 2) words.
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(factory()->frame_alignment_marker()));
- __ j(not_equal, &aligned);
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
- __ bind(&aligned);
- }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@@ -2073,7 +1998,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->check_hole_value()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
@@ -2094,34 +2019,20 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register object = ToRegister(instr->TempAt(0));
- Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0));
- ASSERT(!value.is(object));
- Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
-
- int offset = JSGlobalPropertyCell::kValueOffset;
- __ mov(object, Immediate(cell_handle));
+ Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(FieldOperand(object, offset), factory()->the_hole_value());
+ if (instr->hydrogen()->check_hole_value()) {
+ __ cmp(cell_operand, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
- __ mov(FieldOperand(object, offset), value);
-
- // Cells are always in the remembered set.
- __ RecordWriteField(object,
- offset,
- value,
- address,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET);
+ __ mov(cell_operand, value);
}
@@ -2152,7 +2063,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs);
+ __ RecordWrite(context, offset, value, temp);
}
}
@@ -2369,14 +2280,16 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset);
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2446,7 +2359,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -2768,7 +2680,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -3094,7 +3005,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
}
@@ -3151,7 +3062,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
+ __ RecordWrite(object, offset, value, temp);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
@@ -3160,7 +3071,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
+ __ RecordWrite(temp, offset, value, object);
}
}
}
@@ -3219,7 +3130,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -3236,13 +3146,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- __ test(value, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3265,7 +3168,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
key,
times_pointer_size,
FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value, kSaveFPRegs);
+ __ RecordWrite(elements, key, value);
}
}
@@ -3309,7 +3212,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -3432,7 +3334,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3512,7 +3413,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -3580,7 +3480,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3682,6 +3581,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+ LTaggedToI* instr_;
+};
+
+
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@@ -3763,16 +3672,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -3983,16 +3882,9 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ cmp(reg, Operand::Cell(cell));
- } else {
- Operand operand = ToOperand(instr->value());
- __ cmp(operand, instr->hydrogen()->target());
- }
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Operand operand = ToOperand(instr->InputAt(0));
+ __ cmp(operand, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4296,12 +4188,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = equal;
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+ final_branch_condition = above_equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4413,7 +4303,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 6037c0868a..6156327420 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -58,7 +58,6 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
@@ -134,10 +133,6 @@ class LCodeGen BASE_EMBEDDED {
int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
- bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
- void set_dynamic_frame_alignment(bool value) {
- dynamic_frame_alignment_ = value;
- }
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -302,7 +297,6 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
- bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
@@ -352,20 +346,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
+ : codegen_(codegen), external_exit_(NULL) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -376,7 +366,6 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
- int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 856106c799..3dc220d3d9 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -214,11 +214,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
+ stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -352,11 +351,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) {
- spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
- spill_slot_count_++;
- num_double_slots_++;
- }
+ if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
@@ -712,9 +707,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ instr->set_environment(CreateEnvironment(hydrogen_env));
return instr;
}
@@ -1001,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1017,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_,
value_count,
outer);
+ int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1025,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
@@ -1480,10 +1471,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
// We only need a temp register for non-strict compare.
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
@@ -1692,13 +1683,7 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
- // emit code, we will still get the register but will do an immediate
- // compare instead of the cell compare. This is safe.
- LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@@ -1785,7 +1770,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->RequiresHoleCheck()
+ return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1801,10 +1786,8 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new LStoreGlobalCell(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
+ return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
@@ -1825,13 +1808,15 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
LOperand* value;
LOperand* temp;
- LOperand* context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
+ context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
@@ -1959,7 +1944,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* obj = UseRegister(instr->object());
+ LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@@ -2036,14 +2021,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = UseRegisterAtStart(instr->object());
- }
+ LOperand* obj = needs_write_barrier
+ ? UseTempRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 3a06ac358b..038049ca06 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -101,7 +101,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
+ V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -615,18 +615,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
+class LIsNullAndBranch: public LControlInstruction<1, 1> {
public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
+ LIsNullAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
+ bool is_strict() const { return hydrogen()->is_strict(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -1231,12 +1230,10 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -1801,8 +1798,6 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
- LOperand* value() { return inputs_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@@ -2075,7 +2070,6 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
- num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2089,8 +2083,6 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
- int num_double_slots() const { return num_double_slots_; }
-
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2132,7 +2124,6 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
- int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2268,8 +2259,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 3aaa22acc9..837112a55c 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -44,8 +44,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
+ allow_stub_calls_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -53,75 +52,33 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-void MacroAssembler::InNewSpace(
- Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == equal || cc == not_equal);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch) {
+ if (emit_debug_code()) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, not_equal, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
}
- // Check that we can use a test_b.
- ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
- ASSERT(MemoryChunk::IN_TO_SPACE < 8);
- int mask = (1 << MemoryChunk::IN_FROM_SPACE)
- | (1 << MemoryChunk::IN_TO_SPACE);
- // If non-zero, the page belongs to new-space.
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- j(cc, condition_met, condition_met_distance);
-}
+ // Compute the page start address from the heap object pointer, and reuse
+ // the 'object' register for it.
+ and_(object, ~Page::kPageAlignmentMask);
-void MacroAssembler::RememberedSetHelper(
- Register object, // Only used for debug checks.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- MacroAssembler::RememberedSetFinalAction and_then) {
- Label done;
- if (FLAG_debug_code) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(scratch, Operand::StaticVariable(store_buffer));
- // Store pointer to buffer.
- mov(Operand(scratch, 0), addr);
- // Increment buffer top.
- add(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- mov(Operand::StaticVariable(store_buffer), scratch);
- // Call stub on end of buffer.
- // Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- bind(&done);
- }
+ // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+ // method for more details.
+ shr(addr, Page::kRegionSizeLog2);
+ and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
+
+ // Set dirty mark for region.
+ // Bit tests with a memory operand should be avoided on Intel processors,
+ // as they usually have long latency and multiple uops. We load the bit base
+ // operand to a register at first and store it back after bit set.
+ mov(scratch, Operand(object, Page::kDirtyFlagOffset));
+ bts(Operand(scratch), addr);
+ mov(Operand(object, Page::kDirtyFlagOffset), scratch);
}
@@ -155,144 +112,100 @@ void MacroAssembler::ClampUint8(Register reg) {
}
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
- }
-
- // Array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- Register dst = index;
- lea(dst, Operand(object, index, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance branch_near) {
+ ASSERT(cc == equal || cc == not_equal);
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ mov(scratch, Operand(object));
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ and_(Operand(scratch),
+ Immediate(ExternalReference::new_space_mask(isolate())));
+ cmp(Operand(scratch),
+ Immediate(ExternalReference::new_space_start(isolate())));
+ j(cc, branch, branch_near);
+ } else {
+ int32_t new_space_start = reinterpret_cast<int32_t>(
+ ExternalReference::new_space_start(isolate()).address());
+ lea(scratch, Operand(object, -new_space_start));
+ and_(scratch, isolate()->heap()->NewSpaceMask());
+ j(cc, branch, branch_near);
}
}
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch) {
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
+ // catch stores of Smis and stores into young gen.
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done, Label::kNear);
- }
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfSmi(value, &done, Label::kNear);
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ InNewSpace(object, value, equal, &done, Label::kNear);
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- test_b(dst, (1 << kPointerSizeLog2) - 1);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
+ // The offset is relative to a tagged or untagged HeapObject pointer,
+ // so either offset or offset + kHeapObjectTag must be a
+ // multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize) ||
+ IsAligned(offset + kHeapObjectTag, kPointerSize));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
+ } else {
+ // Array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
+ // into an array of words.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ lea(dst, Operand(object, dst, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ }
+ RecordWriteHelper(object, dst, value);
bind(&done);
- // Clobber clobbered input registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
+ mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
}
}
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
-
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
-
- if (FLAG_debug_code) {
- Label ok;
- cmp(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
+ Register value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
+ // Skip barrier if writing a smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfSmi(value, &done, Label::kNear);
+
+ InNewSpace(object, value, equal, &done);
+
+ RecordWriteHelper(object, address, value);
bind(&done);
- // Clobber clobbered registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
+ mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
}
@@ -311,7 +224,7 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
+ xor_(dst, Operand(dst)); // Shorter than mov.
} else {
mov(dst, x);
}
@@ -374,111 +287,13 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastSmiOnlyElementValue);
- j(below_equal, fail, distance);
+ STATIC_ASSERT(FAST_ELEMENTS == 0);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastElementValue);
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastSmiOnlyElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- bool specialize_for_processor) {
- Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmp(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
- scratch2);
- } else {
- fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
- }
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
- } else {
- fld_d(Operand::StaticVariable(canonical_nan_reference));
- }
- jmp(&have_double_value, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. Convert to a double and store.
- // Preserve original value.
- mov(scratch1, maybe_number);
- SmiUntag(scratch1);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope fscope(SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
- scratch2);
- } else {
- push(scratch1);
- fild_s(Operand(esp, 0));
- pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
- }
- bind(&done);
-}
-
-
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -530,7 +345,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
cmp(scratch,
LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
@@ -587,7 +402,7 @@ void MacroAssembler::AbortIfSmi(Register object) {
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
- mov(ebp, esp);
+ mov(ebp, Operand(esp));
push(esi);
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
@@ -614,7 +429,7 @@ void MacroAssembler::EnterExitFramePrologue() {
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp);
- mov(ebp, esp);
+ mov(ebp, Operand(esp));
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
@@ -636,14 +451,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
if (save_doubles) {
CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
- sub(esp, Immediate(space));
+ sub(Operand(esp), Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
- sub(esp, Immediate(argc * kPointerSize));
+ sub(Operand(esp), Immediate(argc * kPointerSize));
}
// Get the required frame alignment for the OS.
@@ -663,7 +478,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, eax);
+ mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
@@ -717,7 +532,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
void MacroAssembler::LeaveApiExitFrame() {
- mov(esp, ebp);
+ mov(esp, Operand(ebp));
pop(ebp);
LeaveExitFrameEpilogue();
@@ -765,7 +580,7 @@ void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
isolate())));
- add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -797,7 +612,7 @@ void MacroAssembler::Throw(Register value) {
// (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
// of them.
Label skip;
- cmp(edx, Immediate(StackHandler::ENTRY));
+ cmp(Operand(edx), Immediate(StackHandler::ENTRY));
j(equal, &skip, Label::kNear);
mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
bind(&skip);
@@ -881,7 +696,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmp(scratch, Immediate(0));
+ cmp(Operand(scratch), Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
// Load the global context of the current context.
@@ -969,23 +784,23 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
mov(r1, r0);
not_(r0);
shl(r1, 15);
- add(r0, r1);
+ add(r0, Operand(r1));
// hash = hash ^ (hash >> 12);
mov(r1, r0);
shr(r1, 12);
- xor_(r0, r1);
+ xor_(r0, Operand(r1));
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(r1, r0);
shr(r1, 4);
- xor_(r0, r1);
+ xor_(r0, Operand(r1));
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(r1, r0);
shr(r1, 16);
- xor_(r0, r1);
+ xor_(r0, Operand(r1));
// Compute capacity mask.
mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
@@ -999,9 +814,9 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ and_(r2, Operand(r1));
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
@@ -1057,7 +872,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
if (scratch.is(no_reg)) {
mov(result, Operand::StaticVariable(new_space_allocation_top));
} else {
- mov(scratch, Immediate(new_space_allocation_top));
+ mov(Operand(scratch), Immediate(new_space_allocation_top));
mov(result, Operand(scratch, 0));
}
}
@@ -1116,7 +931,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
if (!top_reg.is(result)) {
mov(top_reg, result);
}
- add(top_reg, Immediate(object_size));
+ add(Operand(top_reg), Immediate(object_size));
j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -1127,12 +942,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Tag result if requested.
if (top_reg.is(result)) {
if ((flags & TAG_OBJECT) != 0) {
- sub(result, Immediate(object_size - kHeapObjectTag));
+ sub(Operand(result), Immediate(object_size - kHeapObjectTag));
} else {
- sub(result, Immediate(object_size));
+ sub(Operand(result), Immediate(object_size));
}
} else if ((flags & TAG_OBJECT) != 0) {
- add(result, Immediate(kHeapObjectTag));
+ add(Operand(result), Immediate(kHeapObjectTag));
}
}
@@ -1170,7 +985,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// We assume that element_count*element_size + header_size does not
// overflow.
lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, result);
+ add(result_end, Operand(result));
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -1215,7 +1030,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
- add(result_end, result);
+ add(result_end, Operand(result));
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@@ -1235,7 +1050,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ and_(Operand(object), Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
Check(below, "Undo allocation of non allocated memory");
@@ -1274,7 +1089,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -1308,8 +1123,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
- add(scratch1, Immediate(kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+ and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
@@ -1443,7 +1258,7 @@ void MacroAssembler::CopyBytes(Register source,
Register scratch) {
Label loop, done, short_string, short_loop;
// Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
+ cmp(Operand(length), Immediate(10));
j(less_equal, &short_string);
ASSERT(source.is(esi));
@@ -1458,12 +1273,12 @@ void MacroAssembler::CopyBytes(Register source,
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
- and_(scratch, Immediate(0x3));
- add(destination, scratch);
+ and_(Operand(scratch), Immediate(0x3));
+ add(destination, Operand(scratch));
jmp(&done);
bind(&short_string);
- test(length, length);
+ test(length, Operand(length));
j(zero, &done);
bind(&short_loop);
@@ -1478,27 +1293,13 @@ void MacroAssembler::CopyBytes(Register source,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- jmp(&entry);
- bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
- bind(&entry);
- cmp(start_offset, end_offset);
- j(less, &loop);
-}
-
-
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
Label ok;
- test(result, result);
+ test(result, Operand(result));
j(not_zero, &ok);
- test(op, op);
+ test(op, Operand(op));
j(sign, then_label);
bind(&ok);
}
@@ -1510,10 +1311,10 @@ void MacroAssembler::NegativeZeroTest(Register result,
Register scratch,
Label* then_label) {
Label ok;
- test(result, result);
+ test(result, Operand(result));
j(not_zero, &ok);
- mov(scratch, op1);
- or_(scratch, op2);
+ mov(scratch, Operand(op1));
+ or_(scratch, Operand(op2));
j(sign, then_label);
bind(&ok);
}
@@ -1543,7 +1344,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- cmp(result, Immediate(isolate()->factory()->the_hole_value()));
+ cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
j(equal, miss);
// If the function does not have an initial map, we're done.
@@ -1566,13 +1367,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1583,12 +1384,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1604,15 +1406,9 @@ void MacroAssembler::StubReturn(int argc) {
}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- add(esp, Immediate(num_arguments * kPointerSize));
+ add(Operand(esp), Immediate(num_arguments * kPointerSize));
}
mov(eax, Immediate(isolate()->factory()->undefined_value()));
}
@@ -1646,7 +1442,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, kSaveFPRegs);
+ CEntryStub ces(1);
+ ces.SaveDoubles();
CallStub(&ces);
}
@@ -1826,7 +1623,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
Label leave_exit_frame;
// Check if the result handle holds 0.
- test(eax, eax);
+ test(eax, Operand(eax));
j(zero, &empty_handle);
// It was non-zero. Dereference to get the result value.
mov(eax, Operand(eax, 0));
@@ -1867,7 +1664,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
mov(edi, eax);
mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
mov(eax, Immediate(delete_extensions));
- call(eax);
+ call(Operand(eax));
mov(eax, edi);
jmp(&leave_exit_frame);
@@ -1901,10 +1698,10 @@ void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
if (call_kind == CALL_AS_FUNCTION) {
// Set to some non-zero smi by updating the least significant
// byte.
- mov_b(dst, 1 << kSmiTagSize);
+ mov_b(Operand(dst), 1 << kSmiTagSize);
} else {
// Set to smi zero by clearing the register.
- xor_(dst, dst);
+ xor_(dst, Operand(dst));
}
}
@@ -1949,7 +1746,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmp(expected.reg(), actual.reg());
+ cmp(expected.reg(), Operand(actual.reg()));
j(equal, &invoke);
ASSERT(actual.reg().is(eax));
ASSERT(expected.reg().is(ebx));
@@ -1961,7 +1758,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_operand.is_reg(edx)) {
mov(edx, code_operand);
}
@@ -1987,9 +1784,6 @@ void MacroAssembler::InvokeCode(const Operand& code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, flag, Label::kNear, call_wrapper,
@@ -2015,11 +1809,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
- Operand dummy(eax, 0);
+ Operand dummy(eax);
InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
@@ -2041,9 +1832,6 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
ASSERT(fun.is(edi));
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -2061,9 +1849,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
ASSERT(function->is_compiled());
// Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function)));
@@ -2087,8 +1872,8 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ // Calls are not allowed in some stubs.
+ ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
@@ -2099,7 +1884,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
expected, expected, flag, call_wrapper, CALL_AS_METHOD);
}
-
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the JavaScript builtin function from the builtins object.
@@ -2109,7 +1893,6 @@ void MacroAssembler::GetBuiltinFunction(Register target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
-
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
@@ -2211,7 +1994,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
pop(scratch);
- add(esp, Immediate(bytes_dropped));
+ add(Operand(esp), Immediate(bytes_dropped));
push(scratch);
ret(0);
}
@@ -2222,7 +2005,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- add(esp, Immediate(stack_elements * kPointerSize));
+ add(Operand(esp), Immediate(stack_elements * kPointerSize));
}
}
@@ -2365,19 +2148,13 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
push(eax);
push(Immediate(p0));
push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
+ CallRuntime(Runtime::kAbort, 2);
// will not return here
int3();
}
@@ -2400,7 +2177,7 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
ASSERT(is_uintn(power + HeapNumber::kExponentBias,
HeapNumber::kExponentBits));
mov(scratch, Immediate(power + HeapNumber::kExponentBias));
- movd(dst, scratch);
+ movd(dst, Operand(scratch));
psllq(dst, HeapNumber::kMantissaBits);
}
@@ -2426,8 +2203,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
Label* failure) {
// Check that both objects are not smis.
STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, object1);
- and_(scratch1, object2);
+ mov(scratch1, Operand(object1));
+ and_(scratch1, Operand(object2));
JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
@@ -2456,12 +2233,12 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
- sub(esp, Immediate((num_arguments + 1) * kPointerSize));
+ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
- sub(esp, Immediate(num_arguments * kPointerSize));
+ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
}
}
@@ -2469,39 +2246,27 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
- mov(eax, Immediate(function));
+ mov(Operand(eax), Immediate(function));
CallCFunction(eax, num_arguments);
}
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
- ASSERT(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
- call(function);
+ call(Operand(function));
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
- add(esp, Immediate(num_arguments * kPointerSize));
+ add(Operand(esp), Immediate(num_arguments * kPointerSize));
}
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
@@ -2523,198 +2288,6 @@ CodePatcher::~CodePatcher() {
}
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- } else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
- add(mask_scratch, mask_scratch); // Shift left 1 by adding.
- j(zero, &word_boundary, Label::kNear);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- jmp(&other_color, Label::kNear);
-
- bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
-
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
- mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- and_(bitmap_reg, addr_reg);
- mov(ecx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shr(ecx, shift);
- and_(ecx,
- (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
-
- add(bitmap_reg, ecx);
- mov(ecx, addr_reg);
- shr(ecx, kPointerSizeLog2);
- and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
- mov(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (FLAG_debug_code) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, FACTORY->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqAsciiString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (FLAG_debug_code) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, "Live Bytes Count overflow chunk size");
- }
-
- bind(&done);
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index a1b42c280c..1906644c35 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -29,7 +29,6 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
-#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -51,13 +50,6 @@ enum AllocationFlags {
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -69,130 +61,42 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, zero, branch, distance);
- }
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_zero, branch, distance);
- }
+ // For page containing |object| mark region covering |addr| dirty.
+ // RecordWriteHelper only works if the object is not in new
+ // space.
+ void RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch);
- // Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check);
- }
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // equal for new space, not_equal otherwise.
+ Label* branch,
+ Label::Distance branch_near = Label::kFar);
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a
- // Smi. All registers are clobbered by the operation RecordWriteArray
+ // For page containing |object| mark region covering [object+offset]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. If offset is zero, then the scratch register
+ // contains the array index into the elements array represented as a
+ // Smi. All registers are clobbered by the operation. RecordWrite
// filters out smis so it does not update the write barrier if the
// value is a smi.
- void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ void RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
+ // object being stored. All registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ void RecordWrite(Register object,
+ Register address,
+ Register value);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -201,6 +105,15 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // ---------------------------------------------------------------------------
+ // Activation frames
+
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -246,15 +159,6 @@ class MacroAssembler: public Assembler {
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
- }
-
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -321,29 +225,6 @@ class MacroAssembler: public Assembler {
Label* fail,
Label::Distance distance = Label::kFar);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- bool specialize_for_processor);
-
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object)
@@ -396,7 +277,7 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- add(reg, reg);
+ add(reg, Operand(reg));
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
@@ -584,13 +465,6 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -793,9 +667,6 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// String utilities.
@@ -819,14 +690,9 @@ class MacroAssembler: public Assembler {
return SafepointRegisterStackIndex(reg.code());
}
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
private:
bool generating_stub_;
bool allow_stub_calls_;
- bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -837,10 +703,14 @@ class MacroAssembler: public Assembler {
const Operand& code_operand,
Label* done,
InvokeFlag flag,
- Label::Distance done_distance,
+ Label::Distance done_near = Label::kFar,
const CallWrapper& call_wrapper = NullCallWrapper(),
CallKind call_kind = CALL_AS_METHOD);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -859,20 +729,6 @@ class MacroAssembler: public Assembler {
Register scratch,
bool gc_allowed);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses ecx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 8b0b9ab911..d175d9e036 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -134,7 +134,7 @@ int RegExpMacroAssemblerIA32::stack_limit_slack() {
void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
if (by != 0) {
- __ add(edi, Immediate(by * char_size()));
+ __ add(Operand(edi), Immediate(by * char_size()));
}
}
@@ -152,8 +152,8 @@ void RegExpMacroAssemblerIA32::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
+ __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+ __ jmp(Operand(ebx));
}
@@ -219,7 +219,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
int byte_offset = cp_offset * char_size();
if (check_end_of_string) {
// Check that there are at least str.length() characters left in the input.
- __ cmp(edi, Immediate(-(byte_offset + byte_length)));
+ __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
BranchOrBacktrack(greater, on_failure);
}
@@ -288,7 +288,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
__ j(not_equal, &fallthrough);
- __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
+ __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop.
BranchOrBacktrack(no_condition, on_equal);
__ bind(&fallthrough);
}
@@ -300,7 +300,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, edx); // Length of capture.
+ __ sub(ebx, Operand(edx)); // Length of capture.
// The length of a capture should not be negative. This can only happen
// if the end of the capture is unrecorded, or at a point earlier than
@@ -320,9 +320,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ push(backtrack_stackpointer());
// After this, the eax, ecx, and edi registers are available.
- __ add(edx, esi); // Start of capture
- __ add(edi, esi); // Start of text to match against capture.
- __ add(ebx, edi); // End of text to match against capture.
+ __ add(edx, Operand(esi)); // Start of capture
+ __ add(edi, Operand(esi)); // Start of text to match against capture.
+ __ add(ebx, Operand(edi)); // End of text to match against capture.
Label loop;
__ bind(&loop);
@@ -339,15 +339,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ movzx_b(ecx, Operand(edx, 0));
__ or_(ecx, 0x20);
- __ cmp(eax, ecx);
+ __ cmp(eax, Operand(ecx));
__ j(not_equal, &fail);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ add(edx, Immediate(1));
- __ add(edi, Immediate(1));
+ __ add(Operand(edx), Immediate(1));
+ __ add(Operand(edi), Immediate(1));
// Compare to end of match, and loop if not done.
- __ cmp(edi, ebx);
+ __ cmp(edi, Operand(ebx));
__ j(below, &loop);
__ jmp(&success);
@@ -361,9 +361,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Restore original value before continuing.
__ pop(backtrack_stackpointer());
// Drop original value of character position.
- __ add(esp, Immediate(kPointerSize));
+ __ add(Operand(esp), Immediate(kPointerSize));
// Compute new value of character position after the matched part.
- __ sub(edi, esi);
+ __ sub(edi, Operand(esi));
} else {
ASSERT(mode_ == UC16);
// Save registers before calling C function.
@@ -389,19 +389,16 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Set byte_offset2.
// Found by adding negative string-end offset of current position (edi)
// to end of string.
- __ add(edi, esi);
+ __ add(edi, Operand(esi));
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
- __ add(edx, esi);
+ __ add(edx, Operand(esi));
__ mov(Operand(esp, 0 * kPointerSize), edx);
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(compare, argument_count);
- }
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(compare, argument_count);
// Pop original values before reacting on result value.
__ pop(ebx);
__ pop(backtrack_stackpointer());
@@ -409,10 +406,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ pop(esi);
// Check if function returned non-zero for success or zero for failure.
- __ or_(eax, eax);
+ __ or_(eax, Operand(eax));
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
- __ add(edi, ebx);
+ __ add(edi, Operand(ebx));
}
__ bind(&fallthrough);
}
@@ -428,7 +425,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Find length of back-referenced capture.
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
- __ sub(eax, edx); // Length to check.
+ __ sub(eax, Operand(edx)); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
BranchOrBacktrack(less, on_no_match);
// Succeed on empty capture (including no capture)
@@ -436,7 +433,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Check that there are sufficient characters left in the input.
__ mov(ebx, edi);
- __ add(ebx, eax);
+ __ add(ebx, Operand(eax));
BranchOrBacktrack(greater, on_no_match);
// Save register to make it available below.
@@ -444,7 +441,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Compute pointers to match string and capture string
__ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- __ add(edx, esi); // Start of capture.
+ __ add(edx, Operand(esi)); // Start of capture.
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
@@ -459,10 +456,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
}
__ j(not_equal, &fail);
// Increment pointers into capture and match string.
- __ add(edx, Immediate(char_size()));
- __ add(ebx, Immediate(char_size()));
+ __ add(Operand(edx), Immediate(char_size()));
+ __ add(Operand(ebx), Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmp(ebx, ecx);
+ __ cmp(ebx, Operand(ecx));
__ j(below, &loop);
__ jmp(&success);
@@ -474,7 +471,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
__ bind(&success);
// Move current character position to position after match.
__ mov(edi, ecx);
- __ sub(edi, esi);
+ __ sub(Operand(edi), esi);
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
@@ -577,17 +574,17 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
+ __ mov(Operand(eax), current_character());
+ __ xor_(Operand(eax), Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
+ __ sub(Operand(eax), Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
+ __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
__ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
@@ -596,7 +593,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
case 'w': {
if (mode_ != ASCII) {
// Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Immediate('z'));
+ __ cmp(Operand(current_character()), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
@@ -610,7 +607,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
Label done;
if (mode_ != ASCII) {
// Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Immediate('z'));
+ __ cmp(Operand(current_character()), Immediate('z'));
__ j(above, &done);
}
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
@@ -630,10 +627,10 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
// The opposite of '.'.
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
+ __ mov(Operand(eax), current_character());
+ __ xor_(Operand(eax), Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
+ __ sub(Operand(eax), Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
if (mode_ == ASCII) {
BranchOrBacktrack(above, on_no_match);
@@ -644,7 +641,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
+ __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
__ cmp(eax, 1);
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
@@ -671,12 +668,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
+ // Start new stack frame.
__ push(ebp);
__ mov(ebp, esp);
// Save callee-save registers. Order here should correspond to order of
@@ -707,7 +699,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
+ __ or_(eax, Operand(eax));
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &exit_label_);
@@ -716,13 +708,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
- __ sub(esp, Immediate(num_registers_ * kPointerSize));
+ __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
// Load string length.
__ mov(esi, Operand(ebp, kInputEnd));
// Load input position.
__ mov(edi, Operand(ebp, kInputStart));
// Set up edi to be negative offset from string end.
- __ sub(edi, esi);
+ __ sub(edi, Operand(esi));
// Set eax to address of char before start of the string.
// (effectively string position -1).
@@ -744,7 +736,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, +0), eax);
- __ sub(ecx, Immediate(kPointerSize));
+ __ sub(Operand(ecx), Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop);
}
@@ -785,12 +777,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (mode_ == UC16) {
__ lea(ecx, Operand(ecx, edx, times_2, 0));
} else {
- __ add(ecx, edx);
+ __ add(ecx, Operand(edx));
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
// Convert to index from start of string, not end.
- __ add(eax, ecx);
+ __ add(eax, Operand(ecx));
if (mode_ == UC16) {
__ sar(eax, 1); // Convert byte index to character index.
}
@@ -827,7 +819,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
+ __ or_(eax, Operand(eax));
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &exit_label_);
@@ -862,7 +854,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ or_(eax, eax);
+ __ or_(eax, Operand(eax));
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), eax);
@@ -1191,8 +1183,8 @@ void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
void RegExpMacroAssemblerIA32::SafeReturn() {
__ pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
+ __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+ __ jmp(Operand(ebx));
}
@@ -1204,14 +1196,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerIA32::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerIA32::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1220,7 +1212,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ mov(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ add(backtrack_stackpointer(), Immediate(kPointerSize));
+ __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 07cb14d025..ab62764e64 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -66,8 +66,8 @@ static void ProbeTable(Isolate* isolate,
__ j(not_equal, &miss);
// Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
+ __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(Operand(extra));
__ bind(&miss);
} else {
@@ -92,8 +92,8 @@ static void ProbeTable(Isolate* isolate,
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
// Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
+ __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(Operand(offset));
// Pop at miss.
__ bind(&miss);
@@ -204,8 +204,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(scratch, name);
- __ add(scratch, Immediate(flags));
+ __ sub(scratch, Operand(name));
+ __ add(Operand(scratch), Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
@@ -318,7 +318,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, scratch1);
+ __ mov(eax, Operand(scratch1));
__ ret(0);
}
@@ -406,7 +406,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// frame.
// -----------------------------------
__ pop(scratch);
- __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
+ __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
__ push(scratch);
}
@@ -462,7 +462,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
__ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(eax, Immediate(argc * kPointerSize));
+ __ add(Operand(eax), Immediate(argc * kPointerSize));
__ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
__ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
// v8::Arguments::is_construct_call_.
@@ -651,7 +651,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
scratch1, scratch2, scratch3, name,
miss_label);
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@@ -668,8 +668,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
-
- // Leave the internal frame.
+ __ LeaveInternalFrame();
}
void LoadWithInterceptor(MacroAssembler* masm,
@@ -677,21 +676,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register holder,
JSObject* holder_obj,
Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
__ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
__ j(not_equal, interceptor_succeeded);
@@ -789,12 +786,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch,
- kDontSaveFPRegs);
+ __ mov(name_reg, Operand(eax));
+ __ RecordWrite(receiver_reg, offset, name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -804,12 +797,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(scratch,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ __ mov(name_reg, Operand(eax));
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg);
}
// Return the value (register eax).
@@ -943,7 +932,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
} else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Immediate(Handle<Map>(current->map())));
+ __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss);
// Check access rights to the global object. This has to happen
@@ -1064,7 +1053,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ pop(scratch3); // Get return address to place it below.
__ push(receiver); // receiver
- __ mov(scratch2, esp);
+ __ mov(scratch2, Operand(esp));
ASSERT(!scratch2.is(reg));
__ push(reg); // holder
// Push data from AccessorInfo.
@@ -1095,7 +1084,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ PrepareCallApiFunction(kApiArgc);
__ mov(ApiParameterOperand(0), ebx); // name.
- __ add(ebx, Immediate(kPointerSize));
+ __ add(Operand(ebx), Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), ebx); // arguments pointer.
// Emitting a stub call may try to allocate (if the code is not
@@ -1169,43 +1158,41 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
- }
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
- // Leave the internal frame.
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
}
+ __ LeaveInternalFrame();
+
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into holder_reg.
if (interceptor_holder != lookup->holder()) {
@@ -1272,7 +1259,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(ecx, Immediate(Handle<String>(name)));
+ __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
__ j(not_equal, miss);
}
}
@@ -1329,7 +1316,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
Immediate(Handle<SharedFunctionInfo>(function->shared())));
__ j(not_equal, miss);
} else {
- __ cmp(edi, Immediate(Handle<JSFunction>(function)));
+ __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
__ j(not_equal, miss);
}
}
@@ -1454,25 +1441,21 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
+ __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ cmp(eax, ecx);
+ __ cmp(eax, Operand(ecx));
__ j(greater, &attempt_to_grow_elements);
- // Check if value is a smi.
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ JumpIfNotSmi(ecx, &with_write_barrier);
-
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
@@ -1480,27 +1463,20 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
+ // Check if value is a smi.
+ __ JumpIfNotSmi(ecx, &with_write_barrier);
+
+ __ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &call_builtin);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Push the element.
- __ lea(edx, FieldOperand(ebx,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
-
- __ RecordWrite(
- ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ InNewSpace(ebx, ecx, equal, &exit);
+ __ RecordWriteHelper(ebx, edx, ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1508,19 +1484,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ jmp(&call_builtin);
}
- __ mov(edi, Operand(esp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(edi, &no_fast_elements_check);
- __ mov(esi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(esi, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- // We could be lucky and the elements array could be at the top of
- // new-space. In this case we can just grow it in place by moving the
- // allocation pointer up.
-
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
@@ -1534,43 +1497,33 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, ecx);
+ __ cmp(edx, Operand(ecx));
__ j(not_equal, &call_builtin);
- __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
+ __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
// Push the argument...
- __ mov(Operand(edx, 0), edi);
+ __ mov(Operand(edx, 0), ecx);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
Immediate(factory()->the_hole_value()));
}
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta)));
-
- // NOTE: This only happen in new-space, where we don't
- // care about the black-byte-count on pages. Otherwise we should
- // update that too if the object is black.
-
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+ // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
@@ -1632,7 +1585,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(ecx, Immediate(Smi::FromInt(1)));
+ __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
__ j(negative, &return_undefined);
// Get the last element.
@@ -1641,7 +1594,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(eax, Immediate(factory()->the_hole_value()));
+ __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
@@ -2105,10 +2058,10 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
__ sar(ebx, kBitsPerInt - 1);
// Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, ebx);
+ __ xor_(eax, Operand(ebx));
// Add 1 or do nothing depending on ebx.
- __ sub(eax, ebx);
+ __ sub(eax, Operand(ebx));
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
@@ -2191,7 +2144,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
// Allocate space for v8::Arguments implicit values. Must be initialized
// before calling any runtime function.
- __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
+ __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
CheckPrototypes(JSObject::cast(object), edx, holder,
@@ -2207,7 +2160,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall(
if (result->IsFailure()) return result;
__ bind(&miss);
- __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
+ __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
MaybeObject* maybe_result = GenerateMissBranch();
@@ -2646,9 +2599,13 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss);
+
// Compute the cell operand to use.
- __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
+ Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
+ if (Serializer::enabled()) {
+ __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
+ }
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
@@ -2659,23 +2616,8 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ mov(cell_operand, eax);
- Label done;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
-
- __ mov(ecx, eax);
- __ lea(edx, cell_operand);
- // Cells are always in the remembered set.
- __ RecordWrite(ebx, // Object.
- edx, // Address.
- ecx, // Value.
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Return the value (register eax).
- __ bind(&done);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@@ -2707,7 +2649,7 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(Handle<String>(name)));
+ __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
@@ -2755,10 +2697,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
MapList* receiver_maps,
- CodeList* handler_stubs,
- MapList* transitioned_maps) {
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -2766,21 +2707,15 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
// -- esp[0] : return address
// -----------------------------------
Label miss;
- __ JumpIfSmi(edx, &miss, Label::kNear);
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // ebx: receiver->map().
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> map(receiver_maps->at(i));
- __ cmp(edi, map);
- if (transitioned_maps->at(i) == NULL) {
- __ j(equal, Handle<Code>(handler_stubs->at(i)));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ mov(ebx, Immediate(Handle<Map>(transitioned_maps->at(i))));
- __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
+ __ JumpIfSmi(edx, &miss);
+
+ Register map_reg = ebx;
+ __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ __ cmp(map_reg, map);
+ __ j(equal, Handle<Code>(handler_ics->at(current)));
}
__ bind(&miss);
Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
@@ -3006,7 +2941,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -3036,7 +2971,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
@@ -3071,7 +3006,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
@@ -3099,7 +3034,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
LookupResult lookup;
@@ -3135,7 +3070,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
@@ -3160,7 +3095,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
@@ -3185,7 +3120,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ cmp(eax, Immediate(Handle<String>(name)));
+ __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
@@ -3220,7 +3155,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@@ -3363,7 +3298,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// Move argc to ebx and retrieve and tag the JSObject to return.
__ mov(ebx, eax);
__ pop(eax);
- __ or_(eax, Immediate(kHeapObjectTag));
+ __ or_(Operand(eax), Immediate(kHeapObjectTag));
// Remove caller arguments and receiver from the stack and return.
__ pop(ecx);
@@ -3744,10 +3679,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fisttp_d(Operand(esp, 0));
__ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
+ __ add(Operand(esp), Immediate(kPointerSize));
} else {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
@@ -3903,17 +3838,15 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3937,28 +3870,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(above_equal, &miss_force_generic);
}
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ mov(FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize), eax);
- } else {
- ASSERT(elements_kind == FAST_ELEMENTS);
- // Do the store and update the write barrier.
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ lea(ecx, FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Make sure to preserve the value in register eax.
- __ mov(edx, eax);
- __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
- }
+ // Do the store and update the write barrier. Make sure to preserve
+ // the value in register eax.
+ __ mov(edx, Operand(eax));
+ __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+ __ RecordWrite(edi, 0, edx, ecx);
// Done.
__ ret(0);
@@ -3968,11 +3884,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -3985,7 +3896,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, smi_value, is_nan, maybe_nan;
+ Label have_double_value, not_nan;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4006,13 +3918,59 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
}
__ j(above_equal, &miss_force_generic);
- __ StoreNumberToDoubleElements(eax,
- edi,
- ecx,
- edx,
- xmm0,
- &transition_elements_kind,
- true);
+ __ JumpIfSmi(eax, &smi_value, Label::kNear);
+
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
+ __ j(greater_equal, &maybe_nan, Label::kNear);
+
+ __ bind(&not_nan);
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ ret(0);
+ } else {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ ret(0);
+ }
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ j(greater, &is_nan, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
+ __ j(zero, &not_nan);
+ __ bind(&is_nan);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
+ } else {
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+ __ jmp(&have_double_value, Label::kNear);
+
+ __ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ // Preserve original value.
+ __ mov(edx, eax);
+ __ SmiUntag(edx);
+ __ push(edx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(edx);
+ __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4020,11 +3978,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 498cf3af31..b4f789cb44 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -87,8 +87,6 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
- target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
- target);
}
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index d5056a9ce8..0f76a9a06c 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -167,7 +167,7 @@ static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
LookupResult* lookup,
Object* receiver) {
Object* end = lookup->IsProperty()
- ? lookup->holder() : Object::cast(isolate->heap()->null_value());
+ ? lookup->holder() : isolate->heap()->null_value();
for (Object* current = receiver;
current != end;
current = current->GetPrototype()) {
@@ -1084,22 +1084,14 @@ MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
}
-MaybeObject* KeyedLoadIC::ComputePolymorphicStub(
+MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
MapList* receiver_maps,
+ CodeList* targets,
StrictModeFlag strict_mode) {
- CodeList handler_ics(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Map* receiver_map(receiver_maps->at(i));
- MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode);
- Code* cached_stub;
- if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
- handler_ics.Add(cached_stub);
- }
Object* object;
KeyedLoadStubCompiler compiler;
- MaybeObject* maybe_code = compiler.CompileLoadPolymorphic(receiver_maps,
- &handler_ics);
+ MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
+ targets);
if (!maybe_code->ToObject(&object)) return maybe_code;
isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
PROFILE(isolate(), CodeCreateEvent(
@@ -1251,7 +1243,7 @@ MaybeObject* KeyedLoadIC::Load(State state,
stub = indexed_interceptor_stub();
} else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
MaybeObject* maybe_stub = ComputeStub(receiver,
- LOAD,
+ false,
kNonStrictMode,
stub);
stub = maybe_stub->IsFailure() ?
@@ -1359,7 +1351,7 @@ static bool StoreICableLookup(LookupResult* lookup) {
}
-static bool LookupForWrite(JSObject* receiver,
+static bool LookupForWrite(JSReceiver* receiver,
String* name,
LookupResult* lookup) {
receiver->LocalLookup(name, lookup);
@@ -1367,10 +1359,12 @@ static bool LookupForWrite(JSObject* receiver,
return false;
}
- if (lookup->type() == INTERCEPTOR &&
- receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
- receiver->LocalLookupRealNamedProperty(name, lookup);
- return StoreICableLookup(lookup);
+ if (lookup->type() == INTERCEPTOR) {
+ JSObject* object = JSObject::cast(receiver);
+ if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
+ object->LocalLookupRealNamedProperty(name, lookup);
+ return StoreICableLookup(lookup);
+ }
}
return true;
@@ -1382,28 +1376,28 @@ MaybeObject* StoreIC::Store(State state,
Handle<Object> object,
Handle<String> name,
Handle<Object> value) {
- if (!object->IsJSObject()) {
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSProxy::cast(*object)->
- SetProperty(*name, *value, NONE, strict_mode);
- }
-
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
+ if (!object->IsJSReceiver()) {
// The length property of string values is read-only. Throw in strict mode.
if (strict_mode == kStrictMode && object->IsString() &&
name->Equals(isolate()->heap()->length_symbol())) {
return TypeError("strict_read_only_property", object, name);
}
- // Ignore other stores where the receiver is not a JSObject.
+ // Ignore stores where the receiver is not a JSObject.
return *value;
}
+ // Handle proxies.
+ if (object->IsJSProxy()) {
+ return JSReceiver::cast(*object)->
+ SetProperty(*name, *value, NONE, strict_mode);
+ }
+
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
@@ -1601,15 +1595,14 @@ void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
- StubKind stub_kind,
+ bool is_store,
StrictModeFlag strict_mode,
Code* generic_stub) {
State ic_state = target()->ic_state();
- if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) &&
- !IsTransitionStubKind(stub_kind)) {
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
Code* monomorphic_stub;
MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
- stub_kind,
+ is_store,
strict_mode,
generic_stub);
if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
@@ -1628,21 +1621,9 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
MapList target_receiver_maps;
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- target_receiver_maps.Add(receiver->map());
- } else {
- GetReceiverMapsForStub(target(), &target_receiver_maps);
- }
- bool map_added =
- AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map());
- if (IsTransitionStubKind(stub_kind)) {
- MaybeObject* maybe_map = ComputeTransitionedMap(receiver, stub_kind);
- Map* new_map = NULL;
- if (!maybe_map->To(&new_map)) return maybe_map;
- map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
- }
- if (!map_added) {
- // If the miss wasn't due to an unseen map, a polymorphic stub
+ GetReceiverMapsForStub(target(), &target_receiver_maps);
+ if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
+ // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
// won't help, use the generic stub.
return generic_stub;
}
@@ -1663,9 +1644,21 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
ASSERT(maybe_cached_stub->IsCode());
return Code::cast(maybe_cached_stub);
}
- MaybeObject* maybe_stub =
- ComputePolymorphicStub(&target_receiver_maps, strict_mode);
+ // Collect MONOMORPHIC stubs for all target_receiver_maps.
+ CodeList handler_ics(target_receiver_maps.length());
+ for (int i = 0; i < target_receiver_maps.length(); ++i) {
+ Map* receiver_map(target_receiver_maps.at(i));
+ MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+ receiver_map, strict_mode);
+ Code* cached_stub;
+ if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
+ handler_ics.Add(cached_stub);
+ }
+ // Build the MEGAMORPHIC stub.
Code* stub;
+ MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
+ &handler_ics,
+ strict_mode);
if (!maybe_stub->To(&stub)) return maybe_stub;
MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
if (maybe_update->IsFailure()) return maybe_update;
@@ -1682,7 +1675,6 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_elements() ||
- receiver_map->has_fast_smi_only_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -1693,18 +1685,17 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
- StubKind stub_kind,
+ bool is_store,
StrictModeFlag strict_mode,
Code* generic_stub) {
Code* result = NULL;
if (receiver->HasFastElements() ||
- receiver->HasFastSmiOnlyElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
- receiver, stub_kind, strict_mode);
+ receiver, is_store, strict_mode);
if (!maybe_stub->To(&result)) return maybe_stub;
} else {
result = generic_stub;
@@ -1713,21 +1704,6 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
}
-MaybeObject* KeyedIC::ComputeTransitionedMap(JSObject* receiver,
- StubKind stub_kind) {
- switch (stub_kind) {
- case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
- case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
- return receiver->GetElementsTransitionMap(FAST_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
- return receiver->GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
bool is_js_array,
ElementsKind elements_kind) {
@@ -1735,88 +1711,14 @@ MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
}
-// If |map| is contained in |maps_list|, returns |map|; otherwise returns NULL.
-Map* GetMapIfPresent(Map* map, MapList* maps_list) {
- for (int i = 0; i < maps_list->length(); ++i) {
- if (maps_list->at(i) == map) return map;
- }
- return NULL;
-}
-
-
-// Returns the most generic transitioned map for |map| that's found in
-// |maps_list|, or NULL if no transitioned map for |map| is found at all.
-Map* GetTransitionedMap(Map* map, MapList* maps_list) {
- ElementsKind elements_kind = map->elements_kind();
- if (elements_kind == FAST_ELEMENTS) {
- return NULL;
- }
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- bool dummy = true;
- Map* fast_map = map->LookupElementsTransitionMap(FAST_ELEMENTS, &dummy);
- if (fast_map == NULL) return NULL;
- return GetMapIfPresent(fast_map, maps_list);
- }
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- bool dummy = true;
- Map* double_map = map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
- &dummy);
- // In the current implementation, if the DOUBLE map doesn't exist, the
- // FAST map can't exist either.
- if (double_map == NULL) return NULL;
- Map* fast_map = map->LookupElementsTransitionMap(FAST_ELEMENTS, &dummy);
- if (fast_map == NULL) {
- return GetMapIfPresent(double_map, maps_list);
- }
- // Both double_map and fast_map are non-NULL. Return fast_map if it's in
- // maps_list, double_map otherwise.
- Map* fast_map_present = GetMapIfPresent(fast_map, maps_list);
- if (fast_map_present != NULL) return fast_map_present;
- return GetMapIfPresent(double_map, maps_list);
- }
- return NULL;
-}
-
-
-MaybeObject* KeyedStoreIC::ComputePolymorphicStub(
+MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
MapList* receiver_maps,
+ CodeList* targets,
StrictModeFlag strict_mode) {
- // TODO(yangguo): <remove>
- Code* generic_stub = (strict_mode == kStrictMode)
- ? isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Generic_Strict)
- : isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Generic);
- // </remove>
-
- // Collect MONOMORPHIC stubs for all target_receiver_maps.
- CodeList handler_ics(receiver_maps->length());
- MapList transitioned_maps(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Map* receiver_map(receiver_maps->at(i));
- MaybeObject* maybe_cached_stub = NULL;
- Map* transitioned_map = GetTransitionedMap(receiver_map, receiver_maps);
- if (transitioned_map != NULL) {
- // TODO(yangguo): Enable this code!
- // maybe_cached_stub = FastElementsConversionStub(
- // receiver_map->elements_kind(), // original elements_kind
- // transitioned_map->elements_kind(),
- // receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array
- // strict_mode_).TryGetCode();
- // TODO(yangguo): <remove>
- maybe_cached_stub = generic_stub;
- // </remove>
- } else {
- maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode);
- }
- Code* cached_stub;
- if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
- handler_ics.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
- }
Object* object;
KeyedStoreStubCompiler compiler(strict_mode);
- MaybeObject* maybe_code = compiler.CompileStorePolymorphic(
- receiver_maps, &handler_ics, &transitioned_maps);
+ MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
+ targets);
if (!maybe_code->ToObject(&object)) return maybe_code;
isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
PROFILE(isolate(), CodeCreateEvent(
@@ -1884,21 +1786,9 @@ MaybeObject* KeyedStoreIC::Store(State state,
stub = non_strict_arguments_stub();
} else if (!force_generic) {
if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
- StubKind stub_kind = STORE_NO_TRANSITION;
- if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
- if (value->IsHeapNumber()) {
- stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE;
- } else if (value->IsHeapObject()) {
- stub_kind = STORE_TRANSITION_SMI_TO_OBJECT;
- }
- } else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
- }
HandleScope scope(isolate());
MaybeObject* maybe_stub = ComputeStub(receiver,
- stub_kind,
+ true,
strict_mode,
stub);
stub = maybe_stub->IsFailure() ?
@@ -2512,7 +2402,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
bool caught_exception;
- Handle<Object> builtin_args[] = { right };
+ Object** builtin_args[] = { right.location() };
Handle<Object> result = Execution::Call(builtin_function,
left,
ARRAY_SIZE(builtin_args),
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index ca8447eb81..ece5be9f05 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -342,13 +342,6 @@ class LoadIC: public IC {
class KeyedIC: public IC {
public:
- enum StubKind {
- LOAD,
- STORE_NO_TRANSITION,
- STORE_TRANSITION_SMI_TO_OBJECT,
- STORE_TRANSITION_SMI_TO_DOUBLE,
- STORE_TRANSITION_DOUBLE_TO_OBJECT
- };
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
@@ -364,30 +357,26 @@ class KeyedIC: public IC {
virtual Code::Kind kind() const = 0;
MaybeObject* ComputeStub(JSObject* receiver,
- StubKind stub_kind,
+ bool is_store,
StrictModeFlag strict_mode,
Code* default_stub);
- virtual MaybeObject* ComputePolymorphicStub(MapList* receiver_maps,
- StrictModeFlag strict_mode) = 0;
+ virtual MaybeObject* ConstructMegamorphicStub(
+ MapList* receiver_maps,
+ CodeList* targets,
+ StrictModeFlag strict_mode) = 0;
+
+ private:
+ void GetReceiverMapsForStub(Code* stub, MapList* result);
MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
StrictModeFlag strict_mode);
- private:
- void GetReceiverMapsForStub(Code* stub, MapList* result);
-
MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
- StubKind stub_kind,
+ bool is_store,
StrictModeFlag strict_mode,
Code* default_stub);
-
- MaybeObject* ComputeTransitionedMap(JSObject* receiver, StubKind stub_kind);
-
- static bool IsTransitionStubKind(StubKind stub_kind) {
- return stub_kind > STORE_NO_TRANSITION;
- }
};
@@ -430,8 +419,9 @@ class KeyedLoadIC: public KeyedIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
- virtual MaybeObject* ComputePolymorphicStub(
+ virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
+ CodeList* targets,
StrictModeFlag strict_mode);
virtual Code* string_stub() {
@@ -580,8 +570,9 @@ class KeyedStoreIC: public KeyedIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual MaybeObject* ComputePolymorphicStub(
+ virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
+ CodeList* targets,
StrictModeFlag strict_mode);
private:
diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h
deleted file mode 100644
index 43fe0f5539..0000000000
--- a/deps/v8/src/incremental-marking-inl.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INCREMENTAL_MARKING_INL_H_
-#define V8_INCREMENTAL_MARKING_INL_H_
-
-#include "incremental-marking.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
- Object** slot,
- Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
-
- // Object is either grey or white it will be scanned if survives.
- return false;
- }
- return true;
- }
- return false;
-}
-
-
-void IncrementalMarking::RecordWrite(HeapObject* obj,
- Object** slot,
- Object* value) {
- if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(
- HeapObject::RawField(obj, 0), slot, value);
- }
- }
-}
-
-
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
-
- // Object is either grey or white it will be scanned if survives.
- return;
- }
-
- if (is_compacting_) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
- Code::cast(value));
- }
- }
- }
-}
-
-
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
- MarkBit mark_bit) {
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- ASSERT(obj->Size() >= 2*kPointerSize);
- ASSERT(IsMarking());
- Marking::BlackToGrey(mark_bit);
- int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
- int64_t old_bytes_rescanned = bytes_rescanned_;
- bytes_rescanned_ = old_bytes_rescanned + obj_size;
- if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
- // If we have queued twice the heap size for rescanning then we are
- // going around in circles, scanning the same objects again and again
- // as the program mutates the heap faster than we can incrementally
- // trace it. In this case we switch to non-incremental marking in
- // order to finish off this marking phase.
- if (FLAG_trace_gc) {
- PrintF("Hurrying incremental marking because of lack of progress\n");
- }
- allocation_marking_factor_ = kMaxAllocationMarkingFactor;
- }
- }
-
- marking_deque_.UnshiftGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
- WhiteToGrey(obj, mark_bit);
- marking_deque_.PushGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- ASSERT(obj->Size() >= 2*kPointerSize);
- ASSERT(IsMarking());
- Marking::WhiteToGrey(mark_bit);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
deleted file mode 100644
index 88ebd783ea..0000000000
--- a/deps/v8/src/incremental-marking.cc
+++ /dev/null
@@ -1,818 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "incremental-marking.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-IncrementalMarking::IncrementalMarking(Heap* heap)
- : heap_(heap),
- state_(STOPPED),
- marking_deque_memory_(NULL),
- steps_count_(0),
- steps_took_(0),
- longest_step_(0.0),
- old_generation_space_available_at_start_of_incremental_(0),
- old_generation_space_used_at_start_of_incremental_(0),
- steps_count_since_last_gc_(0),
- steps_took_since_last_gc_(0),
- should_hurry_(false),
- allocation_marking_factor_(0),
- allocated_(0) {
-}
-
-
-void IncrementalMarking::TearDown() {
- delete marking_deque_memory_;
-}
-
-
-void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
- Object* value,
- Isolate* isolate) {
- ASSERT(obj->IsHeapObject());
-
- // Fast cases should already be covered by RecordWriteStub.
- ASSERT(value->IsHeapObject());
- ASSERT(!value->IsHeapNumber());
- ASSERT(!value->IsString() ||
- value->IsConsString() ||
- value->IsSlicedString());
- ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
-
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(!marking->is_compacting_);
- marking->RecordWrite(obj, NULL, value);
-}
-
-
-void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate) {
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(marking->is_compacting_);
- marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
- if (IsMarking()) {
- Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(pc);
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
- RecordWriteIntoCode(host, &rinfo, value);
- }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
- Object** slot,
- Code* value) {
- if (BaseRecordWrite(host, slot, value) && is_compacting_) {
- ASSERT(slot != NULL);
- heap_->mark_compact_collector()->
- RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
- }
-}
-
-
-
-class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
- public:
- IncrementalMarkingMarkingVisitor(Heap* heap,
- IncrementalMarking* incremental_marking)
- : heap_(heap),
- incremental_marking_(incremental_marking) {
- }
-
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- if (target->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- MarkObject(target);
- }
- }
-
- void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
- MarkObject(target);
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) {
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
- MarkObject(target);
- }
-
- void VisitCodeEntry(Address entry_address) {
- Object* target = Code::GetObjectFromEntryAddress(entry_address);
- heap_->mark_compact_collector()->
- RecordCodeEntrySlot(entry_address, Code::cast(target));
- MarkObject(target);
- }
-
- void VisitPointer(Object** p) {
- Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordSlot(p, p, obj);
- MarkObject(obj);
- }
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
- heap_->mark_compact_collector()->RecordSlot(start, p, obj);
- MarkObject(obj);
- }
- }
- }
-
- private:
- // Mark object pointed to by p.
- INLINE(void MarkObject(Object* obj)) {
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_object->address(),
- heap_object->Size());
- }
- } else if (Marking::IsWhite(mark_bit)) {
- incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
- }
- }
-
- Heap* heap_;
- IncrementalMarking* incremental_marking_;
-};
-
-
-class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
- public:
- IncrementalMarkingRootMarkingVisitor(Heap* heap,
- IncrementalMarking* incremental_marking)
- : heap_(heap),
- incremental_marking_(incremental_marking) {
- }
-
- void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- private:
- void MarkObjectByPointer(Object** p) {
- Object* obj = *p;
- if (!obj->IsHeapObject()) return;
-
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_object->address(),
- heap_object->Size());
- }
- } else {
- if (Marking::IsWhite(mark_bit)) {
- incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
- }
- }
- }
-
- Heap* heap_;
- IncrementalMarking* incremental_marking_;
-};
-
-
-void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
- bool is_marking,
- bool is_compacting) {
- if (is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
- // It's difficult to filter out slots recorded for large objects.
- if (chunk->owner()->identity() == LO_SPACE &&
- chunk->size() > static_cast<size_t>(Page::kPageSize) &&
- is_compacting) {
- chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
- }
- } else if (chunk->owner()->identity() == CELL_SPACE ||
- chunk->scan_on_scavenge()) {
- chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- } else {
- chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- }
-}
-
-
-void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
- bool is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- if (is_marking) {
- chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- } else {
- chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- }
- chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
- PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- SetOldSpacePageFlags(p, false, false);
- }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
- NewSpace* space) {
- NewSpacePageIterator it(space);
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- SetNewSpacePageFlags(p, false);
- }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
- DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
-
- LargePage* lop = heap_->lo_space()->first_page();
- while (lop->is_valid()) {
- SetOldSpacePageFlags(lop, false, false);
- lop = lop->next_page();
- }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- SetOldSpacePageFlags(p, true, is_compacting_);
- }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- SetNewSpacePageFlags(p, true);
- }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier() {
- ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
- ActivateIncrementalWriteBarrier(heap_->old_data_space());
- ActivateIncrementalWriteBarrier(heap_->cell_space());
- ActivateIncrementalWriteBarrier(heap_->map_space());
- ActivateIncrementalWriteBarrier(heap_->code_space());
- ActivateIncrementalWriteBarrier(heap_->new_space());
-
- LargePage* lop = heap_->lo_space()->first_page();
- while (lop->is_valid()) {
- SetOldSpacePageFlags(lop, true, is_compacting_);
- lop = lop->next_page();
- }
-}
-
-
-bool IncrementalMarking::WorthActivating() {
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- // TODO(gc) consider setting this to some low level so that some
- // debug tests run with incremental marking and some without.
- static const intptr_t kActivationThreshold = 0;
-#endif
-
- return FLAG_incremental_marking &&
- !Serializer::enabled() &&
- heap_->PromotedSpaceSize() > kActivationThreshold;
-}
-
-
-void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
- ASSERT(RecordWriteStub::GetMode(stub) ==
- RecordWriteStub::STORE_BUFFER_ONLY);
-
- if (!IsMarking()) {
- // Initially stub is generated in STORE_BUFFER_ONLY mode thus
- // we don't need to do anything if incremental marking is
- // not active.
- } else if (IsCompacting()) {
- RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
- } else {
- RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
- }
-}
-
-
-static void PatchIncrementalMarkingRecordWriteStubs(
- Heap* heap, RecordWriteStub::Mode mode) {
- NumberDictionary* stubs = heap->code_stubs();
-
- int capacity = stubs->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = stubs->KeyAt(i);
- if (stubs->IsKey(k)) {
- uint32_t key = NumberToUint32(k);
-
- if (CodeStub::MajorKeyFromKey(key) ==
- CodeStub::RecordWrite) {
- Object* e = stubs->ValueAt(i);
- if (e->IsCode()) {
- RecordWriteStub::Patch(Code::cast(e), mode);
- }
- }
- }
- }
-}
-
-
-void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
- if (marking_deque_memory_ == NULL) {
- marking_deque_memory_ = new VirtualMemory(4 * MB);
- marking_deque_memory_->Commit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size(),
- false); // Not executable.
- }
-}
-
-
-void IncrementalMarking::Start() {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start\n");
- }
- ASSERT(FLAG_incremental_marking);
- ASSERT(state_ == STOPPED);
-
- ResetStepCounters();
-
- if (heap_->old_pointer_space()->IsSweepingComplete() &&
- heap_->old_data_space()->IsSweepingComplete()) {
- StartMarking(ALLOW_COMPACTION);
- } else {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start sweeping.\n");
- }
- state_ = SWEEPING;
- }
-
- heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
-}
-
-
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
- if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_obj->address(),
- -heap_obj->Size());
- }
- Marking::AnyToGrey(mark_bit);
- }
-}
-
-
-void IncrementalMarking::StartMarking(CompactionFlag flag) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Start marking\n");
- }
-
- is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
- heap_->mark_compact_collector()->StartCompaction();
-
- state_ = MARKING;
-
- RecordWriteStub::Mode mode = is_compacting_ ?
- RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
-
- PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
-
- EnsureMarkingDequeIsCommitted();
-
- // Initialize marking stack.
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_->size();
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque_.Initialize(addr, addr + size);
-
- ActivateIncrementalWriteBarrier();
-
-#ifdef DEBUG
- // Marking bits are cleared by the sweeper.
- heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
-#endif
-
- heap_->CompletelyClearInstanceofCache();
- heap_->isolate()->compilation_cache()->MarkCompactPrologue();
-
- if (FLAG_cleanup_code_caches_at_gc) {
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
- }
-
- // Mark strong roots grey.
- IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
- heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-
- // Ready to start incremental marking.
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Running\n");
- }
-}
-
-
-void IncrementalMarking::PrepareForScavenge() {
- if (!IsMarking()) return;
- NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
- heap_->new_space()->FromSpaceEnd());
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
- if (!IsMarking()) return;
-
- int current = marking_deque_.bottom();
- int mask = marking_deque_.mask();
- int limit = marking_deque_.top();
- HeapObject** array = marking_deque_.array();
- int new_top = current;
-
- Map* filler_map = heap_->one_pointer_filler_map();
-
- while (current != limit) {
- HeapObject* obj = array[current];
- ASSERT(obj->IsHeapObject());
- current = ((current + 1) & mask);
- if (heap_->InNewSpace(obj)) {
- MapWord map_word = obj->map_word();
- if (map_word.IsForwardingAddress()) {
- HeapObject* dest = map_word.ToForwardingAddress();
- array[new_top] = dest;
- new_top = ((new_top + 1) & mask);
- ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
- }
- } else if (obj->map() != filler_map) {
- // Skip one word filler objects that appear on the
- // stack when we perform in place array shift.
- array[new_top] = obj;
- new_top = ((new_top + 1) & mask);
- ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
- }
- }
- marking_deque_.set_top(new_top);
-
- steps_took_since_last_gc_ = 0;
- steps_count_since_last_gc_ = 0;
- longest_step_ = 0.0;
-}
-
-
-void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
- v->VisitPointers(
- HeapObject::RawField(
- ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
- HeapObject::RawField(
- ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
-
- MarkCompactCollector* collector = heap_->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::GLOBAL_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
- }
-}
-
-
-void IncrementalMarking::Hurry() {
- if (state() == MARKING) {
- double start = 0.0;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Hurry\n");
- start = OS::TimeCurrentMillis();
- }
- // TODO(gc) hurry can mark objects it encounters black as mutator
- // was stopped.
- Map* filler_map = heap_->one_pointer_filler_map();
- Map* global_context_map = heap_->global_context_map();
- IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
- while (!marking_deque_.IsEmpty()) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) {
- continue;
- } else if (map == global_context_map) {
- // Global contexts have weak fields.
- VisitGlobalContext(Context::cast(obj), &marking_visitor);
- } else {
- obj->Iterate(&marking_visitor);
- }
-
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(!Marking::IsBlack(mark_bit));
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
- }
- state_ = COMPLETE;
- if (FLAG_trace_incremental_marking) {
- double end = OS::TimeCurrentMillis();
- PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(end - start));
- }
- }
-
- if (FLAG_cleanup_code_caches_at_gc) {
- PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
- Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
- MemoryChunk::IncrementLiveBytes(poly_cache->address(),
- PolymorphicCodeCache::kSize);
- }
-
- Object* context = heap_->global_contexts_list();
- while (!context->IsUndefined()) {
- NormalizedMapCache* cache = Context::cast(context)->normalized_map_cache();
- MarkBit mark_bit = Marking::MarkBitFrom(cache);
- if (Marking::IsGrey(mark_bit)) {
- Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
- }
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
-void IncrementalMarking::Abort() {
- if (IsStopped()) return;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Aborting.\n");
- }
- heap_->new_space()->LowerInlineAllocationLimit(0);
- IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
- if (IsMarking()) {
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
-
- if (is_compacting_) {
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
- }
- }
- heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
- state_ = STOPPED;
- is_compacting_ = false;
-}
-
-
-void IncrementalMarking::Finalize() {
- Hurry();
- state_ = STOPPED;
- is_compacting_ = false;
- heap_->new_space()->LowerInlineAllocationLimit(0);
- IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
- ASSERT(marking_deque_.IsEmpty());
- heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
-}
-
-
-void IncrementalMarking::MarkingComplete() {
- state_ = COMPLETE;
- // We will set the stack guard to request a GC now. This will mean the rest
- // of the GC gets performed as soon as possible (we can't do a GC here in a
- // record-write context). If a few things get allocated between now and then
- // that shouldn't make us do a scavenge and keep being incremental, so we set
- // the should-hurry flag to indicate that there can't be much work left to do.
- set_should_hurry(true);
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Complete (normal).\n");
- }
- heap_->isolate()->stack_guard()->RequestGC();
-}
-
-
-void IncrementalMarking::Step(intptr_t allocated_bytes) {
- if (heap_->gc_state() != Heap::NOT_IN_GC ||
- !FLAG_incremental_marking ||
- !FLAG_incremental_marking_steps ||
- (state_ != SWEEPING && state_ != MARKING)) {
- return;
- }
-
- allocated_ += allocated_bytes;
-
- if (allocated_ < kAllocatedThreshold) return;
-
- intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
-
- double start = 0;
-
- if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
- start = OS::TimeCurrentMillis();
- }
-
- if (state_ == SWEEPING) {
- if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
- heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
- StartMarking(PREVENT_COMPACTION);
- }
- } else if (state_ == MARKING) {
- Map* filler_map = heap_->one_pointer_filler_map();
- Map* global_context_map = heap_->global_context_map();
- IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
- while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- int size = obj->SizeFromMap(map);
- bytes_to_process -= size;
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
-
- // TODO(gc) switch to static visitor instead of normal visitor.
- if (map == global_context_map) {
- // Global contexts have weak fields.
- Context* ctx = Context::cast(obj);
-
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
-
- VisitGlobalContext(ctx, &marking_visitor);
- } else {
- obj->IterateBody(map->instance_type(), size, &marking_visitor);
- }
-
- MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsGrey(obj_mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
- Marking::MarkBlack(obj_mark_bit);
- MemoryChunk::IncrementLiveBytes(obj->address(), size);
- }
- if (marking_deque_.IsEmpty()) MarkingComplete();
- }
-
- allocated_ = 0;
-
- steps_count_++;
- steps_count_since_last_gc_++;
-
- bool speed_up = false;
-
- if (old_generation_space_available_at_start_of_incremental_ < 10 * MB ||
- SpaceLeftInOldSpace() <
- old_generation_space_available_at_start_of_incremental_ >> 1) {
- // Half of the space that was available is gone while we were
- // incrementally marking.
- speed_up = true;
- old_generation_space_available_at_start_of_incremental_ =
- SpaceLeftInOldSpace();
- }
-
- if (heap_->PromotedTotalSize() >
- old_generation_space_used_at_start_of_incremental_ << 1) {
- // Size of old space doubled while we were incrementally marking.
- speed_up = true;
- old_generation_space_used_at_start_of_incremental_ =
- heap_->PromotedTotalSize();
- }
-
- if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 &&
- allocation_marking_factor_ < kMaxAllocationMarkingFactor) {
- speed_up = true;
- }
-
- if (speed_up && 0) {
- allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
- allocation_marking_factor_ =
- static_cast<int>(allocation_marking_factor_ * 1.3);
- if (FLAG_trace_gc) {
- PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
- }
- }
-
- if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
- double end = OS::TimeCurrentMillis();
- double delta = (end - start);
- longest_step_ = Max(longest_step_, delta);
- steps_took_ += delta;
- steps_took_since_last_gc_ += delta;
- }
-}
-
-
-void IncrementalMarking::ResetStepCounters() {
- steps_count_ = 0;
- steps_took_ = 0;
- longest_step_ = 0.0;
- old_generation_space_available_at_start_of_incremental_ =
- SpaceLeftInOldSpace();
- old_generation_space_used_at_start_of_incremental_ =
- heap_->PromotedTotalSize();
- steps_count_since_last_gc_ = 0;
- steps_took_since_last_gc_ = 0;
- bytes_rescanned_ = 0;
- allocation_marking_factor_ = kInitialAllocationMarkingFactor;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
- return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
deleted file mode 100644
index d1627bcba5..0000000000
--- a/deps/v8/src/incremental-marking.h
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INCREMENTAL_MARKING_H_
-#define V8_INCREMENTAL_MARKING_H_
-
-
-#include "execution.h"
-#include "mark-compact.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-class IncrementalMarking {
- public:
- enum State {
- STOPPED,
- SWEEPING,
- MARKING,
- COMPLETE
- };
-
- explicit IncrementalMarking(Heap* heap);
-
- void TearDown();
-
- State state() {
- ASSERT(state_ == STOPPED || FLAG_incremental_marking);
- return state_;
- }
-
- bool should_hurry() { return should_hurry_; }
-
- inline bool IsStopped() { return state() == STOPPED; }
-
- inline bool IsMarking() { return state() >= MARKING; }
-
- inline bool IsMarkingIncomplete() { return state() == MARKING; }
-
- bool WorthActivating();
-
- void Start();
-
- void Stop();
-
- void PrepareForScavenge();
-
- void UpdateMarkingDequeAfterScavenge();
-
- void Hurry();
-
- void Finalize();
-
- void Abort();
-
- void MarkingComplete();
-
- // It's hard to know how much work the incremental marker should do to make
- // progress in the face of the mutator creating new work for it. We start
- // of at a moderate rate of work and gradually increase the speed of the
- // incremental marker until it completes.
- // Do some marking every time this much memory has been allocated.
- static const intptr_t kAllocatedThreshold = 65536;
- // Start off by marking this many times more memory than has been allocated.
- static const intptr_t kInitialAllocationMarkingFactor = 1;
- // But if we are promoting a lot of data we need to mark faster to keep up
- // with the data that is entering the old space through promotion.
- static const intptr_t kFastMarking = 3;
- // After this many steps we increase the marking/allocating factor.
- static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
- // This is how much we increase the marking/allocating factor by.
- static const intptr_t kAllocationMarkingFactorSpeedup = 2;
- static const intptr_t kMaxAllocationMarkingFactor = 1000000000;
-
- void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
- }
- void Step(intptr_t allocated);
-
- inline void RestartIfNotMarking() {
- if (state_ == COMPLETE) {
- state_ = MARKING;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
- }
- }
- }
-
- static void RecordWriteFromCode(HeapObject* obj,
- Object* value,
- Isolate* isolate);
-
- static void RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate);
-
- inline bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value);
-
-
- inline void RecordWrite(HeapObject* obj, Object** slot, Object* value);
- inline void RecordWriteIntoCode(HeapObject* obj,
- RelocInfo* rinfo,
- Object* value);
- void RecordCodeTargetPatch(Address pc, HeapObject* value);
- void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, Code* value);
-
- inline void RecordWrites(HeapObject* obj);
-
- inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
-
- inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
-
- inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
-
- // Does white->black or keeps gray or black color. Returns true if converting
- // white to black.
- inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (mark_bit.Get()) {
- // Grey or black: Keep the color.
- return false;
- }
- mark_bit.Set();
- ASSERT(Marking::IsBlack(mark_bit));
- return true;
- }
-
- inline int steps_count() {
- return steps_count_;
- }
-
- inline double steps_took() {
- return steps_took_;
- }
-
- inline double longest_step() {
- return longest_step_;
- }
-
- inline int steps_count_since_last_gc() {
- return steps_count_since_last_gc_;
- }
-
- inline double steps_took_since_last_gc() {
- return steps_took_since_last_gc_;
- }
-
- inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
- SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
- }
-
- inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
- SetNewSpacePageFlags(chunk, IsMarking());
- }
-
- MarkingDeque* marking_deque() { return &marking_deque_; }
-
- bool IsCompacting() { return IsMarking() && is_compacting_; }
-
- void ActivateGeneratedStub(Code* stub);
-
- void NotifyOfHighPromotionRate() {
- if (IsMarking()) {
- if (allocation_marking_factor_ < kFastMarking) {
- if (FLAG_trace_gc) {
- PrintF("Increasing marking speed to %d due to high promotion rate\n",
- static_cast<int>(kFastMarking));
- }
- allocation_marking_factor_ = kFastMarking;
- }
- }
- }
-
- private:
- void set_should_hurry(bool val) {
- should_hurry_ = val;
- }
-
- int64_t SpaceLeftInOldSpace();
-
- void ResetStepCounters();
-
- enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
- void StartMarking(CompactionFlag flag);
-
- void ActivateIncrementalWriteBarrier(PagedSpace* space);
- static void ActivateIncrementalWriteBarrier(NewSpace* space);
- void ActivateIncrementalWriteBarrier();
-
- static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
- static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
- void DeactivateIncrementalWriteBarrier();
-
- static void SetOldSpacePageFlags(MemoryChunk* chunk,
- bool is_marking,
- bool is_compacting);
-
- static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
-
- void EnsureMarkingDequeIsCommitted();
-
- void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
-
- Heap* heap_;
-
- State state_;
- bool is_compacting_;
-
- VirtualMemory* marking_deque_memory_;
- MarkingDeque marking_deque_;
-
- int steps_count_;
- double steps_took_;
- double longest_step_;
- int64_t old_generation_space_available_at_start_of_incremental_;
- int64_t old_generation_space_used_at_start_of_incremental_;
- int steps_count_since_last_gc_;
- double steps_took_since_last_gc_;
- int64_t bytes_rescanned_;
- bool should_hurry_;
- int allocation_marking_factor_;
- intptr_t allocated_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_INCREMENTAL_MARKING_H_
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index d6e613176d..aa6b5372ca 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -36,21 +36,6 @@ namespace v8 {
namespace internal {
-SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
- context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
- }
- isolate->set_save_context(this);
-
- // If there is no JS frame under the current C frame, use the value 0.
- JavaScriptFrameIterator it(isolate);
- js_sp_ = it.done() ? 0 : it.frame()->sp();
-}
-
-
bool Isolate::DebuggerHasBreakPoints() {
#ifdef ENABLE_DEBUGGER_SUPPORT
return debug()->has_break_points();
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 492694e607..fd0f673e7e 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -98,14 +98,6 @@ void ThreadLocalTop::InitializeInternal() {
failed_access_check_callback_ = NULL;
save_context_ = NULL;
catcher_ = NULL;
-
- // These members are re-initialized later after deserialization
- // is complete.
- pending_exception_ = NULL;
- has_pending_message_ = false;
- pending_message_obj_ = NULL;
- pending_message_script_ = NULL;
- scheduled_exception_ = NULL;
}
@@ -1292,9 +1284,6 @@ char* Isolate::ArchiveThread(char* to) {
memcpy(to, reinterpret_cast<char*>(thread_local_top()),
sizeof(ThreadLocalTop));
InitializeThreadLocal();
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
return to + sizeof(ThreadLocalTop);
}
@@ -1414,12 +1403,11 @@ Isolate::Isolate()
in_use_list_(0),
free_list_(0),
preallocated_storage_preallocated_(false),
- inner_pointer_to_code_cache_(NULL),
+ pc_to_code_cache_(NULL),
write_input_buffer_(NULL),
global_handles_(NULL),
context_switcher_(NULL),
thread_manager_(NULL),
- fp_stubs_generated_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
embedder_data_(NULL) {
@@ -1587,8 +1575,8 @@ Isolate::~Isolate() {
compilation_cache_ = NULL;
delete bootstrapper_;
bootstrapper_ = NULL;
- delete inner_pointer_to_code_cache_;
- inner_pointer_to_code_cache_ = NULL;
+ delete pc_to_code_cache_;
+ pc_to_code_cache_ = NULL;
delete write_input_buffer_;
write_input_buffer_ = NULL;
@@ -1622,6 +1610,9 @@ Isolate::~Isolate() {
void Isolate::InitializeThreadLocal() {
thread_local_top_.isolate_ = this;
thread_local_top_.Initialize();
+ clear_pending_exception();
+ clear_pending_message();
+ clear_scheduled_exception();
}
@@ -1709,7 +1700,7 @@ bool Isolate::Init(Deserializer* des) {
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();
- inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
+ pc_to_code_cache_ = new PcToCodeCache(this);
write_input_buffer_ = new StringInputBuffer();
global_handles_ = new GlobalHandles(this);
bootstrapper_ = new Bootstrapper();
@@ -1776,14 +1767,9 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, read the state into the now-empty heap.
if (des != NULL) {
des->Deserialize();
- stub_cache_->Initialize(true);
+ stub_cache_->Clear();
}
- // Finish initialization of ThreadLocal after deserialization is done.
- clear_pending_exception();
- clear_pending_message();
- clear_scheduled_exception();
-
// Deserializing may put strange things in the root array's copy of the
// stack guard.
heap_.SetStackLimits();
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 01ab04e60a..2582da644a 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -66,7 +66,7 @@ class HandleScopeImplementer;
class HeapProfiler;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
-class InnerPointerToCodeCache;
+class PcToCodeCache;
class PreallocatedMemoryThread;
class RegExpStack;
class SaveContext;
@@ -841,9 +841,7 @@ class Isolate {
return unicode_cache_;
}
- InnerPointerToCodeCache* inner_pointer_to_code_cache() {
- return inner_pointer_to_code_cache_;
- }
+ PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
@@ -881,12 +879,6 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
-
StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
return &compiler_safe_string_input_buffer_;
}
@@ -1138,13 +1130,12 @@ class Isolate {
PreallocatedStorage in_use_list_;
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
- InnerPointerToCodeCache* inner_pointer_to_code_cache_;
+ PcToCodeCache* pc_to_code_cache_;
StringInputBuffer* write_input_buffer_;
GlobalHandles* global_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
- bool fp_stubs_generated_;
StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
StringTracker* string_tracker_;
@@ -1219,7 +1210,19 @@ class Isolate {
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
public:
- inline explicit SaveContext(Isolate* isolate);
+ explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+ if (isolate->context() != NULL) {
+ context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+ dummy_ = Handle<Context>(isolate->context());
+#endif
+ }
+ isolate->set_save_context(this);
+
+ // If there is no JS frame under the current C frame, use the value 0.
+ JavaScriptFrameIterator it(isolate);
+ js_sp_ = it.done() ? 0 : it.frame()->sp();
+ }
~SaveContext() {
if (context_.is_null()) {
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index ca796a6990..68eab65fd5 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -165,7 +165,7 @@ class JsonParser BASE_EMBEDDED {
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
- isolate_ = source->map()->GetHeap()->isolate();
+ isolate_ = source->map()->isolate();
FlattenString(source);
source_ = source;
source_length_ = source_->length();
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index c1a9e067c9..3ebfbdfc98 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -68,9 +68,9 @@ Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
Handle<String> flags,
bool* has_pending_exception) {
// Call the construct code with 2 arguments.
- Handle<Object> argv[] = { pattern, flags };
- return Execution::New(constructor, ARRAY_SIZE(argv), argv,
- has_pending_exception);
+ Object** argv[2] = { Handle<Object>::cast(pattern).location(),
+ Handle<Object>::cast(flags).location() };
+ return Execution::New(constructor, 2, argv, has_pending_exception);
}
@@ -4723,6 +4723,7 @@ bool OutSet::Get(unsigned value) {
const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
+const DispatchTable::Entry DispatchTable::Config::kNoValue;
void DispatchTable::AddRange(CharacterRange full_range, int value) {
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index df110d1c2a..54297a49ab 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,7 +29,6 @@
#define V8_JSREGEXP_H_
#include "allocation.h"
-#include "assembler.h"
#include "zone-inl.h"
namespace v8 {
@@ -389,7 +388,7 @@ class DispatchTable : public ZoneObject {
typedef uc16 Key;
typedef Entry Value;
static const uc16 kNoKey;
- static const Entry NoValue() { return Value(); }
+ static const Entry kNoValue;
static inline int Compare(uc16 a, uc16 b) {
if (a == b)
return 0;
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 35281eb20f..466110678a 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -1043,13 +1043,11 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
// it into a location different from the operand of a live range
// covering a branch instruction.
// Thus we need to manually record a pointer.
- LInstruction* branch =
- InstructionAt(cur_block->last_instruction_index());
- if (branch->HasPointerMap()) {
- if (phi->representation().IsTagged()) {
+ if (phi->representation().IsTagged()) {
+ LInstruction* branch =
+ InstructionAt(cur_block->last_instruction_index());
+ if (branch->HasPointerMap()) {
branch->pointer_map()->RecordPointer(phi_operand);
- } else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand);
}
}
}
@@ -1144,13 +1142,10 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
// it into a location different from the operand of a live range
// covering a branch instruction.
// Thus we need to manually record a pointer.
- LInstruction* branch = InstructionAt(pred->last_instruction_index());
- if (branch->HasPointerMap()) {
- if (HasTaggedValue(range->id())) {
+ if (HasTaggedValue(range->id())) {
+ LInstruction* branch = InstructionAt(pred->last_instruction_index());
+ if (branch->HasPointerMap()) {
branch->pointer_map()->RecordPointer(cur_op);
- } else if (!cur_op->IsDoubleStackSlot() &&
- !cur_op->IsDoubleRegister()) {
- branch->pointer_map()->RemovePointer(cur_op);
}
}
}
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index 31b16982d1..5410f6f058 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -156,27 +156,6 @@ void LPointerMap::RecordPointer(LOperand* op) {
}
-void LPointerMap::RemovePointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (pointer_operands_[i]->Equals(op)) {
- pointer_operands_.Remove(i);
- --i;
- }
- }
-}
-
-
-void LPointerMap::RecordUntagged(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- untagged_operands_.Add(op);
-}
-
-
void LPointerMap::PrintTo(StringStream* stream) {
stream->Add("{");
for (int i = 0; i < pointer_operands_.length(); ++i) {
@@ -203,7 +182,6 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
return 3;
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 1e90804c30..20da21a63c 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -407,18 +407,9 @@ class LParallelMove : public ZoneObject {
class LPointerMap: public ZoneObject {
public:
explicit LPointerMap(int position)
- : pointer_operands_(8),
- untagged_operands_(0),
- position_(position),
- lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* GetNormalizedOperands() {
- for (int i = 0; i < untagged_operands_.length(); ++i) {
- RemovePointer(untagged_operands_[i]);
- }
- untagged_operands_.Clear();
- return &pointer_operands_;
- }
+ : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+ const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
@@ -428,13 +419,10 @@ class LPointerMap: public ZoneObject {
}
void RecordPointer(LOperand* op);
- void RemovePointer(LOperand* op);
- void RecordUntagged(LOperand* op);
void PrintTo(StringStream* stream);
private:
ZoneList<LOperand*> pointer_operands_;
- ZoneList<LOperand*> untagged_operands_;
int position_;
int lithium_position_;
};
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 6107cbf0b4..d44c2fc1cd 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -1000,7 +1000,6 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!HEAP->InNewSpace(substitution));
- HeapIterator iterator;
AssertNoAllocation no_allocations_please;
// A zone scope for ReferenceCollectorVisitor.
@@ -1017,6 +1016,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
+ HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
@@ -1101,8 +1101,6 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
- HEAP->EnsureHeapIsIterable();
-
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
ReplaceCodeObject(shared_info->code(), *code);
@@ -1273,8 +1271,7 @@ class RelocInfoBuffer {
// Patch positions in code (changes relocation info section) and possibly
// returns new instance of code.
-static Handle<Code> PatchPositionsInCode(
- Handle<Code> code,
+static Handle<Code> PatchPositionsInCode(Handle<Code> code,
Handle<JSArray> position_change_array) {
RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1289,7 +1286,7 @@ static Handle<Code> PatchPositionsInCode(
int new_position = TranslatePosition(position,
position_change_array);
if (position != new_position) {
- RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
+ RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
buffer_writer.Write(&info_copy);
continue;
}
@@ -1336,8 +1333,6 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
- HEAP->EnsureHeapIsIterable();
-
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc
index d62c4d1763..957c0515d6 100644
--- a/deps/v8/src/liveobjectlist.cc
+++ b/deps/v8/src/liveobjectlist.cc
@@ -1336,9 +1336,7 @@ MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
// Allocate the JSArray of the elements.
Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
if (elements->IsFailure()) return Object::cast(*elements);
-
- maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
- if (maybe_result->IsFailure()) return maybe_result;
+ Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
// Set body.elements.
Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
@@ -1464,9 +1462,7 @@ MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
Handle<JSObject> summary_obj =
factory->NewJSObject(isolate->array_function());
if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-
- maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
- if (maybe_result->IsFailure()) return maybe_result;
+ Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
// Create the body object.
Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
@@ -1593,9 +1589,7 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
// Return the result as a JS array.
Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-
- maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
- if (maybe_result->IsFailure()) return maybe_result;
+ Handle<JSArray>::cast(lols)->SetContent(*list);
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
if (result->IsFailure()) return Object::cast(*result);
@@ -2619,7 +2613,7 @@ void LiveObjectList::VerifyNotInFromSpace() {
HeapObject* heap_obj = it.Obj();
if (heap->InFromSpace(heap_obj)) {
OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
- i++, heap_obj, Heap::new_space()->FromSpaceStart());
+ i++, heap_obj, heap->new_space()->FromSpaceLow());
}
}
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index bad5fdc930..3d66b5fb10 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1356,12 +1356,12 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
Handle<Code>* code_objects) {
- HeapIterator iterator;
AssertNoAllocation no_alloc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
+ HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsSharedFunctionInfo()) continue;
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
@@ -1519,9 +1519,8 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
void Logger::LogCodeObjects() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HeapIterator iterator;
AssertNoAllocation no_alloc;
+ HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
}
@@ -1574,7 +1573,6 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
void Logger::LogCompiledFunctions() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1593,9 +1591,9 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HeapIterator iterator;
AssertNoAllocation no_alloc;
+ HeapIterator iterator;
+ i::Isolate* isolate = ISOLATE;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1603,11 +1601,11 @@ void Logger::LogAccessorCallbacks() {
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
if (getter_entry != 0) {
- PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
+ PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
- PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
+ PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
}
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 4d76fc820d..fe19810a2c 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -29,7 +29,6 @@
#define V8_LOG_H_
#include "allocation.h"
-#include "objects.h"
#include "platform.h"
#include "log-utils.h"
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 364fdb6274..30838bd761 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -93,63 +93,6 @@ const int kInvalidProtoDepth = -1;
namespace v8 {
namespace internal {
-class FrameScope {
- public:
- explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
- : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
- masm->set_has_frame(true);
- if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
- masm->EnterFrame(type);
- }
- }
-
- ~FrameScope() {
- if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
- masm_->LeaveFrame(type_);
- }
- masm_->set_has_frame(old_has_frame_);
- }
-
- // Normally we generate the leave-frame code when this object goes
- // out of scope. Sometimes we may need to generate the code somewhere else
- // in addition. Calling this will achieve that, but the object stays in
- // scope, the MacroAssembler is still marked as being in a frame scope, and
- // the code will be generated again when it goes out of scope.
- void GenerateLeaveFrame() {
- masm_->LeaveFrame(type_);
- }
-
- private:
- MacroAssembler* masm_;
- StackFrame::Type type_;
- bool old_has_frame_;
-};
-
-
-class AllowExternalCallThatCantCauseGC: public FrameScope {
- public:
- explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
- : FrameScope(masm, StackFrame::NONE) { }
-};
-
-
-class NoCurrentFrameScope {
- public:
- explicit NoCurrentFrameScope(MacroAssembler* masm)
- : masm_(masm), saved_(masm->has_frame()) {
- masm->set_has_frame(false);
- }
-
- ~NoCurrentFrameScope() {
- masm_->set_has_frame(saved_);
- }
-
- private:
- MacroAssembler* masm_;
- bool saved_;
-};
-
-
// Support for "structured" code comments.
#ifdef DEBUG
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
deleted file mode 100644
index 20f11a78a2..0000000000
--- a/deps/v8/src/mark-compact-inl.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MARK_COMPACT_INL_H_
-#define V8_MARK_COMPACT_INL_H_
-
-#include "isolate.h"
-#include "memory.h"
-#include "mark-compact.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-MarkBit Marking::MarkBitFrom(Address addr) {
- MemoryChunk *p = MemoryChunk::FromAddress(addr);
- return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
- p->ContainsOnlyData());
-}
-
-
-void MarkCompactCollector::SetFlags(int flags) {
- sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
-}
-
-
-void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- if (!mark_bit.Get()) {
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
-#ifdef DEBUG
- UpdateLiveObjectCount(obj);
-#endif
- ProcessNewlyMarkedObject(obj);
- }
-}
-
-
-void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
- ASSERT(!mark_bit.Get());
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
-#ifdef DEBUG
- UpdateLiveObjectCount(obj);
-#endif
-}
-
-
-bool MarkCompactCollector::IsMarked(Object* obj) {
- ASSERT(obj->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(obj);
- return Marking::MarkBitFrom(heap_object).Get();
-}
-
-
-void MarkCompactCollector::RecordSlot(Object** anchor_slot,
- Object** slot,
- Object* object) {
- Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
- if (object_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- object_page->slots_buffer_address(),
- slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(object_page);
- }
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_MARK_COMPACT_INL_H_
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 9fa79ca746..3e4a617b78 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -27,31 +27,20 @@
#include "v8.h"
-#include "code-stubs.h"
#include "compilation-cache.h"
-#include "deoptimizer.h"
#include "execution.h"
+#include "heap-profiler.h"
#include "gdb-jit.h"
#include "global-handles.h"
-#include "heap-profiler.h"
#include "ic-inl.h"
-#include "incremental-marking.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
-
-const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
-const char* Marking::kImpossibleBitPattern = "01";
-
-
// -------------------------------------------------------------------------
// MarkCompactCollector
@@ -59,12 +48,11 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
#ifdef DEBUG
state_(IDLE),
#endif
- sweep_precisely_(false),
- compacting_(false),
- was_marked_incrementally_(false),
- collect_maps_(FLAG_collect_maps),
+ force_compaction_(false),
+ compacting_collection_(false),
+ compact_on_next_gc_(false),
+ previous_marked_count_(0),
tracer_(NULL),
- migration_slots_buffer_(NULL),
#ifdef DEBUG
live_young_objects_size_(0),
live_old_pointer_objects_size_(0),
@@ -80,408 +68,50 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
encountered_weak_maps_(NULL) { }
-#ifdef DEBUG
-class VerifyMarkingVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
- }
- }
- }
-};
-
-
-static void VerifyMarking(Address bottom, Address top) {
- VerifyMarkingVisitor visitor;
- HeapObject* object;
- Address next_object_must_be_here_or_later = bottom;
-
- for (Address current = bottom;
- current < top;
- current += kPointerSize) {
- object = HeapObject::FromAddress(current);
- if (MarkCompactCollector::IsMarked(object)) {
- ASSERT(current >= next_object_must_be_here_or_later);
- object->Iterate(&visitor);
- next_object_must_be_here_or_later = current + object->Size();
- }
- }
-}
-
-
-static void VerifyMarking(NewSpace* space) {
- Address end = space->top();
- NewSpacePageIterator it(space->bottom(), end);
- // The bottom position is at the start of its page. Allows us to use
- // page->body() as start of range on all pages.
- ASSERT_EQ(space->bottom(),
- NewSpacePage::FromAddress(space->bottom())->body());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- Address limit = it.has_next() ? page->body_limit() : end;
- ASSERT(limit == end || !page->Contains(end));
- VerifyMarking(page->body(), limit);
- }
-}
-
-
-static void VerifyMarking(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
- }
-}
-
-
-static void VerifyMarking(Heap* heap) {
- VerifyMarking(heap->old_pointer_space());
- VerifyMarking(heap->old_data_space());
- VerifyMarking(heap->code_space());
- VerifyMarking(heap->cell_space());
- VerifyMarking(heap->map_space());
- VerifyMarking(heap->new_space());
-
- VerifyMarkingVisitor visitor;
-
- LargeObjectIterator it(heap->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- if (MarkCompactCollector::IsMarked(obj)) {
- obj->Iterate(&visitor);
- }
- }
-
- heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-}
-
-
-class VerifyEvacuationVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
- }
- }
- }
-};
-
-
-static void VerifyEvacuation(Address bottom, Address top) {
- VerifyEvacuationVisitor visitor;
- HeapObject* object;
- Address next_object_must_be_here_or_later = bottom;
-
- for (Address current = bottom;
- current < top;
- current += kPointerSize) {
- object = HeapObject::FromAddress(current);
- if (MarkCompactCollector::IsMarked(object)) {
- ASSERT(current >= next_object_must_be_here_or_later);
- object->Iterate(&visitor);
- next_object_must_be_here_or_later = current + object->Size();
- }
- }
-}
-
-
-static void VerifyEvacuation(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
- VerifyEvacuationVisitor visitor;
-
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- Address current = page->body();
- Address limit = it.has_next() ? page->body_limit() : space->top();
- ASSERT(limit == space->top() || !page->Contains(space->top()));
- while (current < limit) {
- HeapObject* object = HeapObject::FromAddress(current);
- object->Iterate(&visitor);
- current += object->Size();
- }
- }
-}
-
-
-static void VerifyEvacuation(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- if (p->IsEvacuationCandidate()) continue;
- VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
- }
-}
-
-
-static void VerifyEvacuation(Heap* heap) {
- VerifyEvacuation(heap->old_pointer_space());
- VerifyEvacuation(heap->old_data_space());
- VerifyEvacuation(heap->code_space());
- VerifyEvacuation(heap->cell_space());
- VerifyEvacuation(heap->map_space());
- VerifyEvacuation(heap->new_space());
-
- VerifyEvacuationVisitor visitor;
- heap->IterateStrongRoots(&visitor, VISIT_ALL);
-}
-#endif
-
-
-void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
- p->MarkEvacuationCandidate();
- evacuation_candidates_.Add(p);
-}
-
-
-bool MarkCompactCollector::StartCompaction() {
- if (!compacting_) {
- ASSERT(evacuation_candidates_.length() == 0);
-
- CollectEvacuationCandidates(heap()->old_pointer_space());
- CollectEvacuationCandidates(heap()->old_data_space());
-
- if (FLAG_compact_code_space) {
- CollectEvacuationCandidates(heap()->code_space());
- }
-
- heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
-
- compacting_ = evacuation_candidates_.length() > 0;
- }
-
- return compacting_;
-}
-
-
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
ASSERT(state_ == PREPARE_GC);
ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
+ // Prepare has selected whether to compact the old generation or not.
+ // Tell the tracer.
+ if (IsCompacting()) tracer_->set_is_compacting();
+
MarkLiveObjects();
- ASSERT(heap_->incremental_marking()->IsStopped());
- if (collect_maps_) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveTransitions();
ClearWeakMaps();
-#ifdef DEBUG
- if (FLAG_verify_heap) {
- VerifyMarking(heap_);
- }
-#endif
+ SweepLargeObjectSpace();
- SweepSpaces();
+ if (IsCompacting()) {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
+ EncodeForwardingAddresses();
- if (!collect_maps_) ReattachInitialMaps();
+ heap()->MarkMapPointersAsEncoded(true);
+ UpdatePointers();
+ heap()->MarkMapPointersAsEncoded(false);
+ heap()->isolate()->pc_to_code_cache()->Flush();
- heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+ RelocateObjects();
+ } else {
+ SweepSpaces();
+ heap()->isolate()->pc_to_code_cache()->Flush();
+ }
Finish();
+ // Save the count of marked objects remaining after the collection and
+ // null out the GC tracer.
+ previous_marked_count_ = tracer_->marked_count();
+ ASSERT(previous_marked_count_ == 0);
tracer_ = NULL;
}
-#ifdef DEBUG
-void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- CHECK(p->markbits()->IsClean());
- CHECK_EQ(0, p->LiveBytes());
- }
-}
-
-void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
-
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- CHECK(p->markbits()->IsClean());
- CHECK_EQ(0, p->LiveBytes());
- }
-}
-
-void MarkCompactCollector::VerifyMarkbitsAreClean() {
- VerifyMarkbitsAreClean(heap_->old_pointer_space());
- VerifyMarkbitsAreClean(heap_->old_data_space());
- VerifyMarkbitsAreClean(heap_->code_space());
- VerifyMarkbitsAreClean(heap_->cell_space());
- VerifyMarkbitsAreClean(heap_->map_space());
- VerifyMarkbitsAreClean(heap_->new_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsWhite(mark_bit));
- }
-}
-#endif
-
-
-static void ClearMarkbits(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-static void ClearMarkbits(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
-}
-
-
-static void ClearMarkbits(Heap* heap) {
- ClearMarkbits(heap->code_space());
- ClearMarkbits(heap->map_space());
- ClearMarkbits(heap->old_pointer_space());
- ClearMarkbits(heap->old_data_space());
- ClearMarkbits(heap->cell_space());
- ClearMarkbits(heap->new_space());
-
- LargeObjectIterator it(heap->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- mark_bit.Clear();
- mark_bit.Next().Clear();
- }
-}
-
-
-bool Marking::TransferMark(Address old_start, Address new_start) {
- // This is only used when resizing an object.
- ASSERT(MemoryChunk::FromAddress(old_start) ==
- MemoryChunk::FromAddress(new_start));
-
- // If the mark doesn't move, we don't check the color of the object.
- // It doesn't matter whether the object is black, since it hasn't changed
- // size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return false;
-
- MarkBit new_mark_bit = MarkBitFrom(new_start);
- MarkBit old_mark_bit = MarkBitFrom(old_start);
-
-#ifdef DEBUG
- ObjectColor old_color = Color(old_mark_bit);
-#endif
-
- if (Marking::IsBlack(old_mark_bit)) {
- old_mark_bit.Clear();
- ASSERT(IsWhite(old_mark_bit));
- Marking::MarkBlack(new_mark_bit);
- return true;
- } else if (Marking::IsGrey(old_mark_bit)) {
- ASSERT(heap_->incremental_marking()->IsMarking());
- old_mark_bit.Clear();
- old_mark_bit.Next().Clear();
- ASSERT(IsWhite(old_mark_bit));
- heap_->incremental_marking()->WhiteToGreyAndPush(
- HeapObject::FromAddress(new_start), new_mark_bit);
- heap_->incremental_marking()->RestartIfNotMarking();
- }
-
-#ifdef DEBUG
- ObjectColor new_color = Color(new_mark_bit);
- ASSERT(new_color == old_color);
-#endif
-
- return false;
-}
-
-
-const char* AllocationSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE: return "NEW_SPACE";
- case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
- case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
- case CODE_SPACE: return "CODE_SPACE";
- case MAP_SPACE: return "MAP_SPACE";
- case CELL_SPACE: return "CELL_SPACE";
- case LO_SPACE: return "LO_SPACE";
- default:
- UNREACHABLE();
- }
-
- return NULL;
-}
-
-
-void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
- ASSERT(space->identity() == OLD_POINTER_SPACE ||
- space->identity() == OLD_DATA_SPACE ||
- space->identity() == CODE_SPACE);
-
- PageIterator it(space);
- int count = 0;
- if (it.has_next()) it.next(); // Never compact the first page.
- while (it.has_next()) {
- Page* p = it.next();
- bool evacuate = false;
- if (FLAG_stress_compaction) {
- int counter = space->heap()->ms_count();
- uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
- if ((counter & 1) == (page_number & 1)) evacuate = true;
- } else {
- if (space->IsFragmented(p)) evacuate = true;
- }
- if (evacuate) {
- AddEvacuationCandidate(p);
- count++;
- } else {
- p->ClearEvacuationCandidate();
- }
- }
-
- if (count > 0 && FLAG_trace_fragmentation) {
- PrintF("Collected %d evacuation candidates for space %s\n",
- count,
- AllocationSpaceName(space->identity()));
- }
-}
-
-
-void MarkCompactCollector::AbortCompaction() {
- if (compacting_) {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
- p->ClearEvacuationCandidate();
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
- }
- compacting_ = false;
- evacuation_candidates_.Rewind(0);
- invalidated_code_.Rewind(0);
- }
- ASSERT_EQ(0, evacuation_candidates_.length());
-}
-
-
void MarkCompactCollector::Prepare(GCTracer* tracer) {
- was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
-
- // Disable collection of maps if incremental marking is enabled.
- // Map collection algorithm relies on a special map transition tree traversal
- // order which is not implemented for incremental marking.
- collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
-
// Rather than passing the tracer around we stash it in a static member
// variable.
tracer_ = tracer;
@@ -490,10 +120,16 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(state_ == IDLE);
state_ = PREPARE_GC;
#endif
+ ASSERT(!FLAG_always_compact || !FLAG_never_compact);
- ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+ compacting_collection_ =
+ FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
+ compact_on_next_gc_ = false;
- if (collect_maps_) CreateBackPointers();
+ if (FLAG_never_compact) compacting_collection_ = false;
+ if (!heap()->map_space()->MapPointersEncodable())
+ compacting_collection_ = false;
+ if (FLAG_collect_maps) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
@@ -501,32 +137,11 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
#endif
- // Clear marking bits for precise sweeping to collect all garbage.
- if (was_marked_incrementally_ && PreciseSweepingRequired()) {
- heap()->incremental_marking()->Abort();
- ClearMarkbits(heap_);
- AbortCompaction();
- was_marked_incrementally_ = false;
- }
-
- // Don't start compaction if we are in the middle of incremental
- // marking cycle. We did not collect any slots.
- if (!FLAG_never_compact && !was_marked_incrementally_) {
- StartCompaction();
- }
-
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->PrepareForMarkCompact();
- }
-
-#ifdef DEBUG
- if (!was_marked_incrementally_) {
- VerifyMarkbitsAreClean();
+ space != NULL; space = spaces.next()) {
+ space->PrepareForMarkCompact(compacting_collection_);
}
-#endif
#ifdef DEBUG
live_bytes_ = 0;
@@ -553,6 +168,31 @@ void MarkCompactCollector::Finish() {
heap()->isolate()->stub_cache()->Clear();
heap()->external_string_table_.CleanUp();
+
+ // If we've just compacted old space there's no reason to check the
+ // fragmentation limit. Just return.
+ if (HasCompacted()) return;
+
+ // We compact the old generation on the next GC if it has gotten too
+ // fragmented (ie, we could recover an expected amount of space by
+ // reclaiming the waste and free list blocks).
+ static const int kFragmentationLimit = 15; // Percent.
+ static const int kFragmentationAllowed = 1 * MB; // Absolute.
+ intptr_t old_gen_recoverable = 0;
+ intptr_t old_gen_used = 0;
+
+ OldSpaces spaces;
+ for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+ old_gen_recoverable += space->Waste() + space->AvailableFree();
+ old_gen_used += space->Size();
+ }
+
+ int old_gen_fragmentation =
+ static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
+ if (old_gen_fragmentation > kFragmentationLimit &&
+ old_gen_recoverable > kFragmentationAllowed) {
+ compact_on_next_gc_ = true;
+ }
}
@@ -621,21 +261,13 @@ class CodeFlusher {
SharedFunctionInfo* shared = candidate->unchecked_shared();
Code* code = shared->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
+ if (!code->IsMarked()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
candidate->set_code(shared->unchecked_code());
}
- // We are in the middle of a GC cycle so the write barrier in the code
- // setter did not record the slot update and we have to do that manually.
- Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->
- RecordCodeEntrySlot(slot, target);
-
candidate = next_candidate;
}
@@ -653,8 +285,7 @@ class CodeFlusher {
SetNextCandidate(candidate, NULL);
Code* code = candidate->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
+ if (!code->IsMarked()) {
candidate->set_code(lazy_compile);
}
@@ -724,14 +355,14 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// except the maps for the object and its possible substrings might be
// marked.
HeapObject* object = HeapObject::cast(*p);
- if (!FLAG_clever_optimizations) return object;
- Map* map = object->map();
- InstanceType type = map->instance_type();
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ InstanceType type = map_word.ToMap()->instance_type();
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
- Heap* heap = map->GetHeap();
- if (second != heap->empty_string()) {
+ Heap* heap = map_word.ToMap()->heap();
+ if (second != heap->raw_unchecked_empty_string()) {
return object;
}
@@ -773,12 +404,14 @@ class StaticMarkingVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitGlobalContext, &VisitGlobalContext);
-
table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
+ table_.Register(kVisitGlobalContext,
+ &FixedBodyVisitor<StaticMarkingVisitor,
+ Context::MarkCompactBodyDescriptor,
+ void>::Visit);
+
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
@@ -823,7 +456,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+ MarkObjectByPointer(heap, p);
}
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
@@ -833,49 +466,29 @@ class StaticMarkingVisitor : public StaticVisitorBase {
if (VisitUnmarkedObjects(heap, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- MarkCompactCollector* collector = heap->mark_compact_collector();
- for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(collector, start, p);
- }
- }
-
- static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(rinfo->target_cell());
- MarkBit mark = Marking::MarkBitFrom(cell);
- heap->mark_compact_collector()->MarkObject(cell, mark);
- }
-
- static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- // TODO(mstarzinger): We do not short-circuit cons strings here, verify
- // that there can be no such embedded pointers and add assertion here.
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- MarkBit mark = Marking::MarkBitFrom(object);
- heap->mark_compact_collector()->MarkObject(object, mark);
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
}
static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
+ Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
// marked since they are contained in HEAP->non_monomorphic_cache().
- target = Code::GetCodeFromTargetAddress(rinfo->target_address());
} else {
- if (FLAG_cleanup_code_caches_at_gc &&
- target->kind() == Code::STUB &&
- target->major_key() == CodeStub::CallFunction &&
- target->has_function_cache()) {
- CallFunctionStub::Clear(heap, rinfo->pc());
- }
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
+ heap->mark_compact_collector()->MarkObject(code);
+ }
+ }
+
+ static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Object* cell = rinfo->target_cell();
+ Object* old_cell = cell;
+ VisitPointer(heap, &cell);
+ if (cell != old_cell) {
+ rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
}
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
@@ -883,21 +496,17 @@ class StaticMarkingVisitor : public StaticVisitorBase {
rinfo->IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+ HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ heap->mark_compact_collector()->MarkObject(code);
}
// Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- Object** anchor_slot,
- Object** p)) {
+ INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p);
- collector->RecordSlot(anchor_slot, p, object);
- MarkBit mark = Marking::MarkBitFrom(object);
- collector->MarkObject(object, mark);
+ if (!object->IsMarked()) {
+ heap->mark_compact_collector()->MarkUnmarkedObject(object);
+ }
}
@@ -906,15 +515,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
HeapObject* obj)) {
#ifdef DEBUG
ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
+ ASSERT(!obj->IsMarked());
#endif
Map* map = obj->map();
- Heap* heap = obj->GetHeap();
- MarkBit mark = Marking::MarkBitFrom(obj);
- heap->mark_compact_collector()->SetMark(obj, mark);
+ collector->SetMark(obj);
// Mark the map pointer and the body.
- MarkBit map_mark = Marking::MarkBitFrom(map);
- heap->mark_compact_collector()->MarkObject(map, map_mark);
+ if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
IterateBody(map, obj);
}
@@ -930,12 +536,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
MarkCompactCollector* collector = heap->mark_compact_collector();
// Visit the unmarked objects.
for (Object** p = start; p < end; p++) {
- Object* o = *p;
- if (!o->IsHeapObject()) continue;
- collector->RecordSlot(start, p, o);
- HeapObject* obj = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(obj);
- if (mark.Get()) continue;
+ if (!(*p)->IsHeapObject()) continue;
+ HeapObject* obj = HeapObject::cast(*p);
+ if (obj->IsMarked()) continue;
VisitUnmarkedObject(collector, obj);
}
return true;
@@ -964,7 +567,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
void> StructObjectVisitor;
static void VisitJSWeakMap(Map* map, HeapObject* object) {
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+ MarkCompactCollector* collector = map->heap()->mark_compact_collector();
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
// Enqueue weak map in linked list of encountered weak maps.
@@ -975,28 +578,25 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip visiting the backing hash table containing the mappings.
int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
- map->GetHeap(),
+ map->heap(),
object,
JSWeakMap::BodyDescriptor::kStartOffset,
JSWeakMap::kTableOffset);
BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
- map->GetHeap(),
+ map->heap(),
object,
JSWeakMap::kTableOffset + kPointerSize,
object_size);
// Mark the backing hash table without pushing it on the marking stack.
- ASSERT(!MarkCompactCollector::IsMarked(weak_map->unchecked_table()));
- ASSERT(MarkCompactCollector::IsMarked(weak_map->unchecked_table()->map()));
-
- HeapObject* unchecked_table = weak_map->unchecked_table();
- MarkBit mark_bit = Marking::MarkBitFrom(unchecked_table);
- collector->SetMark(unchecked_table, mark_bit);
+ ASSERT(!weak_map->unchecked_table()->IsMarked());
+ ASSERT(weak_map->unchecked_table()->map()->IsMarked());
+ collector->SetMark(weak_map->unchecked_table());
}
static void VisitCode(Map* map, HeapObject* object) {
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
- map->GetHeap());
+ map->heap());
}
// Code flushing support.
@@ -1008,7 +608,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static const int kRegExpCodeThreshold = 5;
inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->undefined_value();
+ Object* undefined = heap->raw_unchecked_undefined_value();
return (info->script() != undefined) &&
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
@@ -1029,9 +629,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark =
- Marking::MarkBitFrom(function->unchecked_code());
- if (code_mark.Get()) {
+ if (function->unchecked_code()->IsMarked()) {
shared_info->set_code_age(0);
return false;
}
@@ -1047,9 +645,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark =
- Marking::MarkBitFrom(shared_info->unchecked_code());
- if (code_mark.Get()) {
+ if (shared_info->unchecked_code()->IsMarked()) {
shared_info->set_code_age(0);
return false;
}
@@ -1062,7 +658,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// We never flush code for Api functions.
Object* function_data = shared_info->function_data();
- if (function_data->IsFunctionTemplateInfo()) return false;
+ if (function_data->IsHeapObject() &&
+ (SafeMap(function_data)->instance_type() ==
+ FUNCTION_TEMPLATE_INFO_TYPE)) {
+ return false;
+ }
// Only flush code for functions.
if (shared_info->code()->kind() != Code::FUNCTION) return false;
@@ -1095,9 +695,40 @@ class StaticMarkingVisitor : public StaticVisitorBase {
return true;
}
+
+ static inline Map* SafeMap(Object* obj) {
+ MapWord map_word = HeapObject::cast(obj)->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return map_word.ToMap();
+ }
+
+
+ static inline bool IsJSBuiltinsObject(Object* obj) {
+ return obj->IsHeapObject() &&
+ (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
+ }
+
+
static inline bool IsValidNotBuiltinContext(Object* ctx) {
- return ctx->IsContext() &&
- !Context::cast(ctx)->global()->IsJSBuiltinsObject();
+ if (!ctx->IsHeapObject()) return false;
+
+ Map* map = SafeMap(ctx);
+ Heap* heap = map->heap();
+ if (!(map == heap->raw_unchecked_function_context_map() ||
+ map == heap->raw_unchecked_catch_context_map() ||
+ map == heap->raw_unchecked_with_context_map() ||
+ map == heap->raw_unchecked_global_context_map())) {
+ return false;
+ }
+
+ Context* context = reinterpret_cast<Context*>(ctx);
+
+ if (IsJSBuiltinsObject(context->global())) {
+ return false;
+ }
+
+ return true;
}
@@ -1117,15 +748,13 @@ class StaticMarkingVisitor : public StaticVisitorBase {
bool is_ascii) {
// Make sure that the fixed array is in fact initialized on the RegExp.
// We could potentially trigger a GC when initializing the RegExp.
- if (HeapObject::cast(re->data())->map()->instance_type() !=
- FIXED_ARRAY_TYPE) return;
+ if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
// Make sure this is a RegExp that actually contains code.
if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
- if (!code->IsSmi() &&
- HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
+ if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
// Save a copy that can be reinstated if we need the code again.
re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
code,
@@ -1161,7 +790,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// If we did not use the code for kRegExpCodeThreshold mark sweep GCs
// we flush the code.
static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitJSRegExpFields(map, object);
@@ -1178,7 +807,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitSharedFunctionInfoAndFlushCode(Map* map,
HeapObject* object) {
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+ MarkCompactCollector* collector = map->heap()->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitSharedFunctionInfoGeneric(map, object);
return;
@@ -1189,7 +818,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitSharedFunctionInfoAndFlushCodeGeneric(
Map* map, HeapObject* object, bool known_flush_code_candidate) {
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
@@ -1206,30 +835,18 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void VisitCodeEntry(Heap* heap, Address entry_address) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- MarkBit mark = Marking::MarkBitFrom(code);
- heap->mark_compact_collector()->MarkObject(code, mark);
- heap->mark_compact_collector()->
- RecordCodeEntrySlot(entry_address, code);
- }
-
- static void VisitGlobalContext(Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticMarkingVisitor,
- Context::MarkCompactBodyDescriptor,
- void>::Visit(map, object);
-
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT;
- idx < Context::GLOBAL_CONTEXT_SLOTS;
- ++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
- collector->RecordSlot(slot, slot, *slot);
+ Object* code = Code::GetObjectFromEntryAddress(entry_address);
+ Object* old_code = code;
+ VisitPointer(heap, &code);
+ if (code != old_code) {
+ Memory::Address_at(entry_address) =
+ reinterpret_cast<Code*>(code)->entry();
}
}
+
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
MarkCompactCollector* collector = heap->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitJSFunction(map, object);
@@ -1244,9 +861,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
if (!flush_code_candidate) {
- Code* code = jsfunction->unchecked_shared()->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- heap->mark_compact_collector()->MarkObject(code, code_mark);
+ collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
// For optimized functions we should retain both non-optimized version
@@ -1262,11 +877,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
i < count;
i++) {
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
- MarkBit inlined_code_mark =
- Marking::MarkBitFrom(inlined_code);
- heap->mark_compact_collector()->MarkObject(
- inlined_code, inlined_code_mark);
+ collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
}
}
}
@@ -1291,11 +902,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitJSFunctionFields(Map* map,
JSFunction* object,
bool flush_code_candidate) {
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
VisitPointers(heap,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
+ SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+ SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
if (!flush_code_candidate) {
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -1305,39 +917,29 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Visit shared function info to avoid double checking of it's
// flushability.
SharedFunctionInfo* shared_info = object->unchecked_shared();
- MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
- if (!shared_info_mark.Get()) {
+ if (!shared_info->IsMarked()) {
Map* shared_info_map = shared_info->map();
- MarkBit shared_info_map_mark =
- Marking::MarkBitFrom(shared_info_map);
- heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
- heap->mark_compact_collector()->MarkObject(shared_info_map,
- shared_info_map_mark);
+ collector->SetMark(shared_info);
+ collector->MarkObject(shared_info_map);
VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
shared_info,
true);
}
}
- VisitPointers(
- heap,
- HeapObject::RawField(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object,
- JSFunction::kNonWeakFieldsEndOffset));
+ VisitPointers(heap,
+ SLOT_ADDR(object,
+ JSFunction::kCodeEntryOffset + kPointerSize),
+ SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
// Don't visit the next function list field as it is a weak reference.
- Object** next_function =
- HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
- heap->mark_compact_collector()->RecordSlot(
- next_function, next_function, *next_function);
}
static inline void VisitJSRegExpFields(Map* map,
HeapObject* object) {
int last_property_offset =
JSRegExp::kSize + kPointerSize * map->inobject_properties();
- VisitPointers(map->GetHeap(),
+ VisitPointers(map->heap(),
SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
SLOT_ADDR(object, last_property_offset));
}
@@ -1393,9 +995,7 @@ class CodeMarkingVisitor : public ThreadVisitor {
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- Code* code = it.frame()->unchecked_code();
- MarkBit code_bit = Marking::MarkBitFrom(code);
- collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
+ collector_->MarkObject(it.frame()->unchecked_code());
}
}
@@ -1417,10 +1017,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkBit shared_mark = Marking::MarkBitFrom(shared);
- MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
- collector_->MarkObject(shared->unchecked_code(), code_mark);
- collector_->MarkObject(shared, shared_mark);
+ collector_->MarkObject(shared->unchecked_code());
+ collector_->MarkObject(shared);
}
}
@@ -1432,8 +1030,7 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
void MarkCompactCollector::PrepareForCodeFlushing() {
ASSERT(heap() == Isolate::Current()->heap());
- // TODO(1609) Currently incremental marker does not support code flushing.
- if (!FLAG_flush_code || was_marked_incrementally_) {
+ if (!FLAG_flush_code) {
EnableCodeFlushing(false);
return;
}
@@ -1445,21 +1042,16 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
return;
}
#endif
-
EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
- HeapObject* descriptor_array = heap()->empty_descriptor_array();
- MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
- MarkObject(descriptor_array, descriptor_array_mark);
+ MarkObject(heap()->raw_unchecked_empty_descriptor_array());
// Make sure we are not referencing the code from the stack.
ASSERT(this == heap()->mark_compact_collector());
for (StackFrameIterator it; !it.done(); it.Advance()) {
- Code* code = it.frame()->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- MarkObject(code, code_mark);
+ MarkObject(it.frame()->unchecked_code());
}
// Iterate the archived stacks in all threads to check if
@@ -1472,7 +1064,7 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
- ProcessMarkingDeque();
+ ProcessMarkingStack();
}
@@ -1496,21 +1088,19 @@ class RootMarkingVisitor : public ObjectVisitor {
// Replace flat cons strings in place.
HeapObject* object = ShortCircuitConsString(p);
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) return;
+ if (object->IsMarked()) return;
Map* map = object->map();
// Mark the object.
- collector_->SetMark(object, mark_bit);
+ collector_->SetMark(object);
// Mark the map pointer and body, and push them on the marking stack.
- MarkBit map_mark = Marking::MarkBitFrom(map);
- collector_->MarkObject(map, map_mark);
+ collector_->MarkObject(map);
StaticMarkingVisitor::IterateBody(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
+ collector_->EmptyMarkingStack();
}
MarkCompactCollector* collector_;
@@ -1526,19 +1116,17 @@ class SymbolTableCleaner : public ObjectVisitor {
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
- Object* o = *p;
- if (o->IsHeapObject() &&
- !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
+ if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Check if the symbol being pruned is an external symbol. We need to
// delete the associated external data as this symbol is going away.
// Since no objects have yet been moved we can safely access the map of
// the object.
- if (o->IsExternalString()) {
+ if ((*p)->IsExternalString()) {
heap_->FinalizeExternalString(String::cast(*p));
}
// Set the entry to null_value (as deleted).
- *p = heap_->null_value();
+ *p = heap_->raw_unchecked_null_value();
pointers_removed_++;
}
}
@@ -1559,7 +1147,8 @@ class SymbolTableCleaner : public ObjectVisitor {
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
+ MapWord first_word = HeapObject::cast(object)->map_word();
+ if (first_word.IsMarked()) {
return object;
} else {
return NULL;
@@ -1568,26 +1157,28 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
};
-void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
- ASSERT(IsMarked(object));
+void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
+ ASSERT(!object->IsMarked());
ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
if (FLAG_cleanup_code_caches_at_gc) {
map->ClearCodeCache(heap());
}
+ SetMark(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
// Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(map);
} else {
- marking_deque_.PushBlack(map);
+ marking_stack_.Push(map);
}
} else {
- marking_deque_.PushBlack(object);
+ SetMark(object);
+ marking_stack_.Push(object);
}
}
@@ -1596,17 +1187,12 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions.
- FixedArray* prototype_transitions = map->prototype_transitions();
- MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
- if (!mark.Get()) {
- mark.Set();
- MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
- prototype_transitions->Size());
- }
+ FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
+ if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
- Object** raw_descriptor_array_slot =
- HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
- Object* raw_descriptor_array = *raw_descriptor_array_slot;
+ Object* raw_descriptor_array =
+ *HeapObject::RawField(map,
+ Map::kInstanceDescriptorsOrBitField3Offset);
if (!raw_descriptor_array->IsSmi()) {
MarkDescriptorArray(
reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
@@ -1620,26 +1206,24 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
- StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
+ StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
- MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
- if (descriptors_mark.Get()) return;
+ if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != heap()->empty_descriptor_array());
- SetMark(descriptors, descriptors_mark);
+ ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
+ SetMark(descriptors);
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
- ASSERT(!IsMarked(contents));
+ ASSERT(!contents->IsMarked());
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
- MarkBit contents_mark = Marking::MarkBitFrom(contents);
- SetMark(contents, contents_mark);
+ SetMark(contents);
// Contents contains (value, details) pairs. If the details say that the type
// of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
@@ -1649,45 +1233,27 @@ void MarkCompactCollector::MarkDescriptorArray(
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
-
- Object** slot = contents->data_start() + i;
- Object* value = *slot;
- if (!value->IsHeapObject()) continue;
-
- RecordSlot(slot, slot, *slot);
-
- PropertyType type = details.type();
- if (type < FIRST_PHANTOM_PROPERTY_TYPE) {
- HeapObject* object = HeapObject::cast(value);
- MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
- if (!mark.Get()) {
- SetMark(HeapObject::cast(object), mark);
- marking_deque_.PushBlack(object);
- }
- } else if (type == ELEMENTS_TRANSITION && value->IsFixedArray()) {
- // For maps with multiple elements transitions, the transition maps are
- // stored in a FixedArray. Keep the fixed array alive but not the maps
- // that it refers to.
- HeapObject* object = HeapObject::cast(value);
- MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
- if (!mark.Get()) {
- SetMark(HeapObject::cast(object), mark);
+ if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
+ HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
+ if (object->IsHeapObject() && !object->IsMarked()) {
+ SetMark(object);
+ marking_stack_.Push(object);
}
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
- marking_deque_.PushBlack(descriptors);
+ marking_stack_.Push(descriptors);
}
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(heap()->map_space());
- for (HeapObject* next_object = iterator.Next();
- next_object != NULL; next_object = iterator.Next()) {
- if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
+ for (HeapObject* next_object = iterator.next();
+ next_object != NULL; next_object = iterator.next()) {
+ if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
map->CreateBackPointers();
} else {
@@ -1698,123 +1264,54 @@ void MarkCompactCollector::CreateBackPointers() {
}
-// Fill the marking stack with overflowed objects returned by the given
-// iterator. Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
- MarkingDeque* marking_deque,
- T* it) {
- // The caller should ensure that the marking stack is initially not full,
- // so that we don't waste effort pointlessly scanning for objects.
- ASSERT(!marking_deque->IsFull());
-
- Map* filler_map = heap->one_pointer_filler_map();
- for (HeapObject* object = it->Next();
- object != NULL;
- object = it->Next()) {
- MarkBit markbit = Marking::MarkBitFrom(object);
- if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
- Marking::GreyToBlack(markbit);
- MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
- }
- }
-}
-
-
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- MarkBit::CellType* cells = p->markbits()->cells();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
-
- int cell_index = Page::kFirstUsedCell;
- Address cell_base = p->ObjectAreaStart();
-
- for (cell_index = Page::kFirstUsedCell;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
-
- const MarkBit::CellType current_cell = cells[cell_index];
- if (current_cell == 0) continue;
-
- const MarkBit::CellType next_cell = cells[cell_index + 1];
- MarkBit::CellType grey_objects = current_cell &
- ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
-
- int offset = 0;
- while (grey_objects != 0) {
- int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
- grey_objects >>= trailing_zeros;
- offset += trailing_zeros;
- MarkBit markbit(&cells[cell_index], 1 << offset, false);
- ASSERT(Marking::IsGrey(markbit));
- Marking::GreyToBlack(markbit);
- Address addr = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(addr);
- MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
- offset += 2;
- grey_objects >>= 2;
- }
-
- grey_objects >>= (Bitmap::kBitsPerCell - 1);
- }
+static int OverflowObjectSize(HeapObject* obj) {
+ // Recover the normal map pointer, it might be marked as live and
+ // overflowed.
+ MapWord map_word = obj->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return obj->SizeFromMap(map_word.ToMap());
}
-static void DiscoverGreyObjectsInSpace(Heap* heap,
- MarkingDeque* marking_deque,
- PagedSpace* space) {
- if (!space->was_swept_conservatively()) {
- HeapObjectIterator it(space);
- DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
- } else {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- DiscoverGreyObjectsOnPage(marking_deque, p);
- if (marking_deque->IsFull()) return;
+class OverflowedObjectsScanner : public AllStatic {
+ public:
+ // Fill the marking stack with overflowed objects returned by the given
+ // iterator. Stop when the marking stack is filled or the end of the space
+ // is reached, whichever comes first.
+ template<class T>
+ static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
+ T* it) {
+ // The caller should ensure that the marking stack is initially not full,
+ // so that we don't waste effort pointlessly scanning for objects.
+ ASSERT(!collector->marking_stack_.is_full());
+
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
+ if (object->IsOverflowed()) {
+ object->ClearOverflow();
+ ASSERT(object->IsMarked());
+ ASSERT(HEAP->Contains(object));
+ collector->marking_stack_.Push(object);
+ if (collector->marking_stack_.is_full()) return;
+ }
}
}
-}
+};
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
- Object* o = *p;
- if (!o->IsHeapObject()) return false;
- HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- return !mark.Get();
+ return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
}
void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = heap()->symbol_table();
+ SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
// Mark the symbol table itself.
- MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
- SetMark(symbol_table, symbol_table_mark);
+ SetMark(symbol_table);
// Explicitly mark the prefix.
MarkingVisitor marker(heap());
symbol_table->IteratePrefix(&marker);
- ProcessMarkingDeque();
+ ProcessMarkingStack();
}
@@ -1827,9 +1324,9 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
MarkSymbolTable();
// There may be overflowed objects in the heap. Visit them now.
- while (marking_deque_.overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ while (marking_stack_.overflowed()) {
+ RefillMarkingStack();
+ EmptyMarkingStack();
}
}
@@ -1847,13 +1344,9 @@ void MarkCompactCollector::MarkObjectGroups() {
bool group_marked = false;
for (size_t j = 0; j < entry->length_; j++) {
Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- if (mark.Get()) {
- group_marked = true;
- break;
- }
+ if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
+ group_marked = true;
+ break;
}
}
@@ -1862,21 +1355,17 @@ void MarkCompactCollector::MarkObjectGroups() {
continue;
}
- // An object in the group is marked, so mark as grey all white heap
- // objects in the group.
+ // An object in the group is marked, so mark all heap objects in
+ // the group.
for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- MarkObject(heap_object, mark);
+ if ((*objects[j])->IsHeapObject()) {
+ MarkObject(HeapObject::cast(*objects[j]));
}
}
- // Once the entire group has been colored grey, set the object group
- // to NULL so it won't be processed again.
+ // Once the entire group has been marked, dispose it because it's
+ // not needed anymore.
entry->Dispose();
- object_groups->at(i) = NULL;
}
object_groups->Rewind(last);
}
@@ -1891,7 +1380,7 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
ImplicitRefGroup* entry = ref_groups->at(i);
ASSERT(entry != NULL);
- if (!IsMarked(*entry->parent_)) {
+ if (!(*entry->parent_)->IsMarked()) {
(*ref_groups)[last++] = entry;
continue;
}
@@ -1900,9 +1389,7 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// A parent object is marked, so mark all child heap objects.
for (size_t j = 0; j < entry->length_; ++j) {
if ((*children[j])->IsHeapObject()) {
- HeapObject* child = HeapObject::cast(*children[j]);
- MarkBit mark = Marking::MarkBitFrom(child);
- MarkObject(child, mark);
+ MarkObject(HeapObject::cast(*children[j]));
}
}
@@ -1918,17 +1405,21 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingDeque() {
- while (!marking_deque_.IsEmpty()) {
- while (!marking_deque_.IsEmpty()) {
- HeapObject* object = marking_deque_.Pop();
+void MarkCompactCollector::EmptyMarkingStack() {
+ while (!marking_stack_.is_empty()) {
+ while (!marking_stack_.is_empty()) {
+ HeapObject* object = marking_stack_.Pop();
ASSERT(object->IsHeapObject());
ASSERT(heap()->Contains(object));
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ ASSERT(object->IsMarked());
+ ASSERT(!object->IsOverflowed());
- Map* map = object->map();
- MarkBit map_mark = Marking::MarkBitFrom(map);
- MarkObject(map, map_mark);
+ // Because the object is marked, we have to recover the original map
+ // pointer and use it to mark the object's body.
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ Map* map = map_word.ToMap();
+ MarkObject(map);
StaticMarkingVisitor::IterateBody(map, object);
}
@@ -1945,45 +1436,39 @@ void MarkCompactCollector::EmptyMarkingDeque() {
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
-void MarkCompactCollector::RefillMarkingDeque() {
- ASSERT(marking_deque_.overflowed());
+void MarkCompactCollector::RefillMarkingStack() {
+ ASSERT(marking_stack_.overflowed());
- SemiSpaceIterator new_it(heap()->new_space());
- DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
- if (marking_deque_.IsFull()) return;
+ SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
+ if (marking_stack_.is_full()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->old_pointer_space());
- if (marking_deque_.IsFull()) return;
+ HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
+ &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
+ if (marking_stack_.is_full()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->old_data_space());
- if (marking_deque_.IsFull()) return;
+ HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
+ if (marking_stack_.is_full()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->code_space());
- if (marking_deque_.IsFull()) return;
+ HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
+ if (marking_stack_.is_full()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->map_space());
- if (marking_deque_.IsFull()) return;
+ HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
+ if (marking_stack_.is_full()) return;
- DiscoverGreyObjectsInSpace(heap(),
- &marking_deque_,
- heap()->cell_space());
- if (marking_deque_.IsFull()) return;
+ HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
+ if (marking_stack_.is_full()) return;
- LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(heap(),
- &marking_deque_,
- &lo_it);
- if (marking_deque_.IsFull()) return;
+ LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
+ OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
+ if (marking_stack_.is_full()) return;
- marking_deque_.ClearOverflowed();
+ marking_stack_.clear_overflowed();
}
@@ -1991,23 +1476,23 @@ void MarkCompactCollector::RefillMarkingDeque() {
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
-void MarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
- while (marking_deque_.overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+void MarkCompactCollector::ProcessMarkingStack() {
+ EmptyMarkingStack();
+ while (marking_stack_.overflowed()) {
+ RefillMarkingStack();
+ EmptyMarkingStack();
}
}
void MarkCompactCollector::ProcessExternalMarking() {
bool work_to_do = true;
- ASSERT(marking_deque_.IsEmpty());
+ ASSERT(marking_stack_.is_empty());
while (work_to_do) {
MarkObjectGroups();
MarkImplicitRefGroups();
- work_to_do = !marking_deque_.IsEmpty();
- ProcessMarkingDeque();
+ work_to_do = !marking_stack_.is_empty();
+ ProcessMarkingStack();
}
}
@@ -2019,43 +1504,16 @@ void MarkCompactCollector::MarkLiveObjects() {
// with the C stack limit check.
PostponeInterruptsScope postpone(heap()->isolate());
- bool incremental_marking_overflowed = false;
- IncrementalMarking* incremental_marking = heap_->incremental_marking();
- if (was_marked_incrementally_) {
- // Finalize the incremental marking and check whether we had an overflow.
- // Both markers use grey color to mark overflowed objects so
- // non-incremental marker can deal with them as if overflow
- // occured during normal marking.
- // But incremental marker uses a separate marking deque
- // so we have to explicitly copy it's overflow state.
- incremental_marking->Finalize();
- incremental_marking_overflowed =
- incremental_marking->marking_deque()->overflowed();
- incremental_marking->marking_deque()->ClearOverflowed();
- } else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Abort();
- }
-
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
- // The to space contains live objects, a page in from space is used as a
- // marking stack.
- Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
- Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
- if (FLAG_force_marking_deque_overflows) {
- marking_deque_end = marking_deque_start + 64 * kPointerSize;
- }
- marking_deque_.Initialize(marking_deque_start,
- marking_deque_end);
- ASSERT(!marking_deque_.overflowed());
+ // The to space contains live objects, the from space is used as a marking
+ // stack.
+ marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
+ heap()->new_space()->FromSpaceHigh());
- if (incremental_marking_overflowed) {
- // There are overflowed objects left in the heap after incremental marking.
- marking_deque_.SetOverflowed();
- }
+ ASSERT(!marking_stack_.overflowed());
PrepareForCodeFlushing();
@@ -2077,20 +1535,15 @@ void MarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedHeapObject);
// Then we mark the objects and process the transitive closure.
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- while (marking_deque_.overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ while (marking_stack_.overflowed()) {
+ RefillMarkingStack();
+ EmptyMarkingStack();
}
// Repeat host application specific marking to mark unmarked objects
// reachable from the weak roots.
ProcessExternalMarking();
- AfterMarking();
-}
-
-
-void MarkCompactCollector::AfterMarking() {
// Object literal map caches reference symbols (cache keys) and maps
// (cache values). At this point still useful maps have already been
// marked. Mark the keys for the alive values before we process the
@@ -2100,7 +1553,7 @@ void MarkCompactCollector::AfterMarking() {
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
- SymbolTable* symbol_table = heap()->symbol_table();
+ SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
SymbolTableCleaner v(heap());
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
@@ -2129,13 +1582,13 @@ void MarkCompactCollector::ProcessMapCaches() {
Object* raw_context = heap()->global_contexts_list_;
while (raw_context != heap()->undefined_value()) {
Context* context = reinterpret_cast<Context*>(raw_context);
- if (IsMarked(context)) {
+ if (context->IsMarked()) {
HeapObject* raw_map_cache =
HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
// A map cache may be reachable from the stack. In this case
// it's already transitively marked and it's too late to clean
// up its parts.
- if (!IsMarked(raw_map_cache) &&
+ if (!raw_map_cache->IsMarked() &&
raw_map_cache != heap()->undefined_value()) {
MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
int existing_elements = map_cache->NumberOfElements();
@@ -2148,7 +1601,8 @@ void MarkCompactCollector::ProcessMapCaches() {
raw_key == heap()->null_value()) continue;
STATIC_ASSERT(MapCache::kEntrySize == 2);
Object* raw_map = map_cache->get(i + 1);
- if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
+ if (raw_map->IsHeapObject() &&
+ HeapObject::cast(raw_map)->IsMarked()) {
++used_elements;
} else {
// Delete useless entries with unmarked maps.
@@ -2164,15 +1618,14 @@ void MarkCompactCollector::ProcessMapCaches() {
// extra complexity during GC. We rely on subsequent cache
// usages (EnsureCapacity) to do this.
map_cache->ElementsRemoved(existing_elements - used_elements);
- MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
- MarkObject(map_cache, map_cache_markbit);
+ MarkObject(map_cache);
}
}
}
// Move to next element in the list.
raw_context = context->get(Context::NEXT_CONTEXT_LINK);
}
- ProcessMarkingDeque();
+ ProcessMarkingStack();
}
@@ -2202,26 +1655,27 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
#endif // DEBUG
-void MarkCompactCollector::ReattachInitialMaps() {
- HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next();
- obj != NULL;
- obj = map_iterator.Next()) {
- if (obj->IsFreeSpace()) continue;
- Map* map = Map::cast(obj);
+void MarkCompactCollector::SweepLargeObjectSpace() {
+#ifdef DEBUG
+ ASSERT(state_ == MARK_LIVE_OBJECTS);
+ state_ =
+ compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
+#endif
+ // Deallocate unmarked objects and clear marked bits for marked objects.
+ heap()->lo_space()->FreeUnmarkedObjects();
+}
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
- if (map->attached_to_shared_function_info()) {
- JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
- }
- }
+// Safe to use during marking phase only.
+bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
+ MapWord metamap = object->map_word();
+ metamap.ClearMark();
+ return metamap.ToMap()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(heap()->map_space());
+ HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
@@ -2232,19 +1686,17 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
- for (HeapObject* obj = map_iterator.Next();
- obj != NULL; obj = map_iterator.Next()) {
+ for (HeapObject* obj = map_iterator.next();
+ obj != NULL; obj = map_iterator.next()) {
Map* map = reinterpret_cast<Map*>(obj);
- MarkBit map_mark = Marking::MarkBitFrom(map);
- if (map->IsFreeSpace()) continue;
+ if (!map->IsMarked() && map->IsByteArray()) continue;
- ASSERT(map->IsMap());
+ ASSERT(SafeIsMap(map));
// Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
- if (map_mark.Get() &&
- map->attached_to_shared_function_info()) {
+ if (map->IsMarked() && map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
// from SharedFunctionInfo during the mark phase.
// Since it survived the GC, reattach it now.
@@ -2253,55 +1705,52 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Clear dead prototype transitions.
int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->prototype_transitions();
-
- int new_number_of_transitions = 0;
- const int header = Map::kProtoTransitionHeaderSize;
- const int proto_offset =
- header + Map::kProtoTransitionPrototypeOffset;
- const int map_offset = header + Map::kProtoTransitionMapOffset;
- const int step = Map::kProtoTransitionElementsPerEntry;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* prototype = prototype_transitions->get(proto_offset + i * step);
- Object* cached_map = prototype_transitions->get(map_offset + i * step);
- if (IsMarked(prototype) && IsMarked(cached_map)) {
- if (new_number_of_transitions != i) {
- prototype_transitions->set_unchecked(
- heap_,
- proto_offset + new_number_of_transitions * step,
- prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(
- heap_,
- map_offset + new_number_of_transitions * step,
- cached_map,
- SKIP_WRITE_BARRIER);
+ if (number_of_transitions > 0) {
+ FixedArray* prototype_transitions =
+ map->unchecked_prototype_transitions();
+ int new_number_of_transitions = 0;
+ const int header = Map::kProtoTransitionHeaderSize;
+ const int proto_offset =
+ header + Map::kProtoTransitionPrototypeOffset;
+ const int map_offset = header + Map::kProtoTransitionMapOffset;
+ const int step = Map::kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* prototype = prototype_transitions->get(proto_offset + i * step);
+ Object* cached_map = prototype_transitions->get(map_offset + i * step);
+ if (HeapObject::cast(prototype)->IsMarked() &&
+ HeapObject::cast(cached_map)->IsMarked()) {
+ if (new_number_of_transitions != i) {
+ prototype_transitions->set_unchecked(
+ heap_,
+ proto_offset + new_number_of_transitions * step,
+ prototype,
+ UPDATE_WRITE_BARRIER);
+ prototype_transitions->set_unchecked(
+ heap_,
+ map_offset + new_number_of_transitions * step,
+ cached_map,
+ SKIP_WRITE_BARRIER);
+ }
+ new_number_of_transitions++;
}
}
// Fill slots that became free with undefined value.
- Object* undefined = heap()->undefined_value();
+ Object* undefined = heap()->raw_unchecked_undefined_value();
for (int i = new_number_of_transitions * step;
i < number_of_transitions * step;
i++) {
- // The undefined object is on a page that is never compacted and never
- // in new space so it is OK to skip the write barrier. Also it's a
- // root.
prototype_transitions->set_unchecked(heap_,
header + i,
undefined,
SKIP_WRITE_BARRIER);
-
- Object** undefined_slot =
- prototype_transitions->data_start() + i;
- RecordSlot(undefined_slot, undefined_slot, undefined);
}
map->SetNumberOfProtoTransitions(new_number_of_transitions);
}
// Follow the chain of back pointers to find the prototype.
Map* current = map;
- while (current->IsMap()) {
+ while (SafeIsMap(current)) {
current = reinterpret_cast<Map*>(current->prototype());
ASSERT(current->IsHeapObject());
}
@@ -2310,28 +1759,21 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Follow back pointers, setting them to prototype,
// clearing map transitions when necessary.
current = map;
- bool on_dead_path = !map_mark.Get();
+ bool on_dead_path = !current->IsMarked();
Object* next;
- while (current->IsMap()) {
+ while (SafeIsMap(current)) {
next = current->prototype();
// There should never be a dead map above a live map.
- MarkBit current_mark = Marking::MarkBitFrom(current);
- bool is_alive = current_mark.Get();
- ASSERT(on_dead_path || is_alive);
+ ASSERT(on_dead_path || current->IsMarked());
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
- if (on_dead_path && is_alive) {
+ if (on_dead_path && current->IsMarked()) {
on_dead_path = false;
current->ClearNonLiveTransitions(heap(), real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
-
- if (is_alive) {
- Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
- RecordSlot(slot, slot, real_prototype);
- }
current = reinterpret_cast<Map*>(next);
}
}
@@ -2341,13 +1783,13 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
void MarkCompactCollector::ProcessWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
+ ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
ObjectHashTable* table = weak_map->unchecked_table();
for (int i = 0; i < table->Capacity(); i++) {
- if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+ if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
Object* value = table->get(table->EntryToValueIndex(i));
- StaticMarkingVisitor::VisitPointer(heap(), &value);
+ StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
table->set_unchecked(heap(),
table->EntryToValueIndex(i),
value,
@@ -2362,11 +1804,11 @@ void MarkCompactCollector::ProcessWeakMaps() {
void MarkCompactCollector::ClearWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
- ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
+ ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
ObjectHashTable* table = weak_map->unchecked_table();
for (int i = 0; i < table->Capacity(); i++) {
- if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+ if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
table->RemoveEntry(i, heap());
}
}
@@ -2376,97 +1818,316 @@ void MarkCompactCollector::ClearWeakMaps() {
set_encountered_weak_maps(Smi::FromInt(0));
}
-
-// We scavange new space simultaneously with sweeping. This is done in two
-// passes.
+// -------------------------------------------------------------------------
+// Phase 2: Encode forwarding addresses.
+// When compacting, forwarding addresses for objects in old space and map
+// space are encoded in their map pointer word (along with an encoding of
+// their map pointers).
//
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwarding address is written directly into
-// first word of object without any encoding. If object is dead we write
-// NULL as a forwarding address.
+// The excact encoding is described in the comments for class MapWord in
+// objects.h.
//
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead new space objects during traversal of pointers
-// to new space. We should clear them to avoid encountering them during next
-// pointer iteration. This is an issue if the store buffer overflows and we
-// have to scan the entire old space, including dead objects, looking for
-// pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
- Address src,
- int size,
- AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
- Address src_slot = src;
- Address dst_slot = dst;
- ASSERT(IsAligned(size, kPointerSize));
-
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
- if (heap_->InNewSpace(value)) {
- heap_->store_buffer()->Mark(dst_slot);
- } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- reinterpret_cast<Object**>(dst_slot),
- SlotsBuffer::IGNORE_OVERFLOW);
- }
+// An address range [start, end) can have both live and non-live objects.
+// Maximal non-live regions are marked so they can be skipped on subsequent
+// sweeps of the heap. A distinguished map-pointer encoding is used to mark
+// free regions of one-word size (in which case the next word is the start
+// of a live object). A second distinguished map-pointer encoding is used
+// to mark free regions larger than one word, and the size of the free
+// region (including the first word) is written to the second word of the
+// region.
+//
+// Any valid map page offset must lie in the object area of the page, so map
+// page offsets less than Page::kObjectStartOffset are invalid. We use a
+// pair of distinguished invalid map encodings (for single word and multiple
+// words) to indicate free regions in the page found during computation of
+// forwarding addresses and skipped over in subsequent sweeps.
+
+
+// Encode a free region, defined by the given start address and size, in the
+// first word or two of the region.
+void EncodeFreeRegion(Address free_start, int free_size) {
+ ASSERT(free_size >= kIntSize);
+ if (free_size == kIntSize) {
+ Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
+ } else {
+ ASSERT(free_size >= 2 * kIntSize);
+ Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
+ Memory::int_at(free_start + kIntSize) = free_size;
+ }
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
+#ifdef DEBUG
+ // Zap the body of the free region.
+ if (FLAG_enable_slow_asserts) {
+ for (int offset = 2 * kIntSize;
+ offset < free_size;
+ offset += kPointerSize) {
+ Memory::Address_at(free_start + offset) = kZapValue;
}
+ }
+#endif
+}
- if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
- Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
- Address code_entry = Memory::Address_at(code_entry_slot);
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT,
- code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
+// Try to promote all objects in new space. Heap numbers and sequential
+// strings are promoted to the code space, large objects to large object space,
+// and all others to the old space.
+inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
+ HeapObject* object,
+ int object_size) {
+ MaybeObject* forwarded;
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
+ forwarded = Failure::Exception();
+ } else {
+ OldSpace* target_space = heap->TargetSpace(object);
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
+ forwarded = target_space->MCAllocateRaw(object_size);
+ }
+ Object* result;
+ if (!forwarded->ToObject(&result)) {
+ result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
+ }
+ return result;
+}
+
+
+// Allocation functions for the paged spaces call the space's MCAllocateRaw.
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
+ Heap *heap,
+ HeapObject* ignore,
+ int object_size) {
+ return heap->old_pointer_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
+ Heap* heap,
+ HeapObject* ignore,
+ int object_size) {
+ return heap->old_data_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
+ Heap* heap,
+ HeapObject* ignore,
+ int object_size) {
+ return heap->code_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
+ Heap* heap,
+ HeapObject* ignore,
+ int object_size) {
+ return heap->map_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
+ Heap* heap, HeapObject* ignore, int object_size) {
+ return heap->cell_space()->MCAllocateRaw(object_size);
+}
+
+
+// The forwarding address is encoded at the same offset as the current
+// to-space object, but in from space.
+inline void EncodeForwardingAddressInNewSpace(Heap* heap,
+ HeapObject* old_object,
+ int object_size,
+ Object* new_object,
+ int* ignored) {
+ int offset =
+ heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
+ Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
+ HeapObject::cast(new_object)->address();
+}
+
+
+// The forwarding address is encoded in the map pointer of the object as an
+// offset (in terms of live bytes) from the address of the first live object
+// in the page.
+inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
+ HeapObject* old_object,
+ int object_size,
+ Object* new_object,
+ int* offset) {
+ // Record the forwarding address of the first live object if necessary.
+ if (*offset == 0) {
+ Page::FromAddress(old_object->address())->mc_first_forwarded =
+ HeapObject::cast(new_object)->address();
+ }
+
+ MapWord encoding =
+ MapWord::EncodeAddress(old_object->map()->address(), *offset);
+ old_object->set_map_word(encoding);
+ *offset += object_size;
+ ASSERT(*offset <= Page::kObjectAreaSize);
+}
+
+
+// Most non-live objects are ignored.
+inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
+
+
+// Function template that, given a range of addresses (eg, a semispace or a
+// paged space page), iterates through the objects in the range to clear
+// mark bits and compute and encode forwarding addresses. As a side effect,
+// maximal free chunks are marked so that they can be skipped on subsequent
+// sweeps.
+//
+// The template parameters are an allocation function, a forwarding address
+// encoding function, and a function to process non-live objects.
+template<MarkCompactCollector::AllocationFunction Alloc,
+ MarkCompactCollector::EncodingFunction Encode,
+ MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
+ Address start,
+ Address end,
+ int* offset) {
+ // The start address of the current free region while sweeping the space.
+ // This address is set when a transition from live to non-live objects is
+ // encountered. A value (an encoding of the 'next free region' pointer)
+ // is written to memory at this address when a transition from non-live to
+ // live objects is encountered.
+ Address free_start = NULL;
+
+ // A flag giving the state of the previously swept object. Initially true
+ // to ensure that free_start is initialized to a proper address before
+ // trying to write to it.
+ bool is_prev_alive = true;
+
+ int object_size; // Will be set on each iteration of the loop.
+ for (Address current = start; current < end; current += object_size) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ if (object->IsMarked()) {
+ object->ClearMark();
+ collector->tracer()->decrement_marked_count();
+ object_size = object->Size();
+
+ Object* forwarded =
+ Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
+ Encode(collector->heap(), object, object_size, forwarded, offset);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("forward %p -> %p.\n", object->address(),
+ HeapObject::cast(forwarded)->address());
+ }
+#endif
+ if (!is_prev_alive) { // Transition from non-live to live.
+ EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
+ is_prev_alive = true;
+ }
+ } else { // Non-live object.
+ object_size = object->Size();
+ ProcessNonLive(object, collector->heap()->isolate());
+ if (is_prev_alive) { // Transition from live to non-live.
+ free_start = current;
+ is_prev_alive = false;
}
+ LiveObjectList::ProcessNonLive(object);
}
- } else if (dest == CODE_SPACE) {
- PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
- heap()->MoveBlock(dst, src, size);
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
- &migration_slots_buffer_,
- SlotsBuffer::RELOCATED_CODE_OBJECT,
- dst,
- SlotsBuffer::IGNORE_OVERFLOW);
- Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+ }
+
+ // If we ended on a free region, mark it.
+ if (!is_prev_alive) {
+ EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
+ }
+}
+
+
+// Functions to encode the forwarding pointers in each compactable space.
+void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
+ int ignored;
+ EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
+ EncodeForwardingAddressInNewSpace,
+ IgnoreNonLiveObject>(
+ this,
+ heap()->new_space()->bottom(),
+ heap()->new_space()->top(),
+ &ignored);
+}
+
+
+template<MarkCompactCollector::AllocationFunction Alloc,
+ MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
+ PagedSpace* space) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ // The offset of each live object in the page from the first live object
+ // in the page.
+ int offset = 0;
+ EncodeForwardingAddressesInRange<Alloc,
+ EncodeForwardingAddressInPagedSpace,
+ ProcessNonLive>(
+ this,
+ p->ObjectAreaStart(),
+ p->AllocationTop(),
+ &offset);
+ }
+}
+
+
+// We scavange new space simultaneously with sweeping. This is done in two
+// passes.
+// The first pass migrates all alive objects from one semispace to another or
+// promotes them to old space. Forwading address is written directly into
+// first word of object without any encoding. If object is dead we are writing
+// NULL as a forwarding address.
+// The second pass updates pointers to new space in all spaces. It is possible
+// to encounter pointers to dead objects during traversal of dirty regions we
+// should clear them to avoid encountering them during next dirty regions
+// iteration.
+static void MigrateObject(Heap* heap,
+ Address dst,
+ Address src,
+ int size,
+ bool to_old_space) {
+ if (to_old_space) {
+ heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
} else {
- ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- heap()->MoveBlock(dst, src, size);
+ heap->CopyBlock(dst, src, size);
}
+
Memory::Address_at(src) = dst;
}
+class StaticPointersToNewGenUpdatingVisitor : public
+ StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
+ public:
+ static inline void VisitPointer(Heap* heap, Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+ Address old_addr = obj->address();
+
+ if (heap->new_space()->Contains(obj)) {
+ ASSERT(heap->InFromSpace(*p));
+ *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
+ }
+ }
+};
+
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor: public ObjectVisitor {
+class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
+ explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
void VisitPointer(Object** p) {
- UpdatePointer(p);
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
}
void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- VisitPointer(&target);
- rinfo->set_target_object(target);
+ for (Object** p = start; p < end; p++) {
+ StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
+ }
}
void VisitCodeTarget(RelocInfo* rinfo) {
@@ -2486,96 +2147,68 @@ class PointersUpdatingVisitor: public ObjectVisitor {
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
- static inline void UpdateSlot(Heap* heap, Object** slot) {
- Object* obj = *slot;
-
- if (!obj->IsHeapObject()) return;
-
- HeapObject* heap_obj = HeapObject::cast(obj);
-
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
- ASSERT(heap->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
- HeapObject* target = map_word.ToForwardingAddress();
- *slot = target;
- ASSERT(!heap->InFromSpace(target) &&
- !MarkCompactCollector::IsOnEvacuationCandidate(target));
- }
- }
-
private:
- inline void UpdatePointer(Object** p) {
- UpdateSlot(heap_, p);
- }
-
Heap* heap_;
};
-static void UpdatePointer(HeapObject** p, HeapObject* object) {
- ASSERT(*p == object);
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It can encounter pointers to dead objects in new space when traversing map
+// space (see comment for MigrateObject).
+static void UpdatePointerToNewGen(HeapObject** p) {
+ if (!(*p)->IsHeapObject()) return;
- Address old_addr = object->address();
+ Address old_addr = (*p)->address();
+ ASSERT(HEAP->InFromSpace(*p));
Address new_addr = Memory::Address_at(old_addr);
- // The new space sweep will overwrite the map word of dead objects
- // with NULL. In this case we do not need to transfer this entry to
- // the store buffer which we are rebuilding.
- if (new_addr != NULL) {
- *p = HeapObject::FromAddress(new_addr);
+ if (new_addr == NULL) {
+ // We encountered pointer to a dead object. Clear it so we will
+ // not visit it again during next iteration of dirty regions.
+ *p = NULL;
} else {
- // We have to zap this pointer, because the store buffer may overflow later,
- // and then we have to scan the entire heap and we don't want to find
- // spurious newspace pointers in the old space.
- *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
+ *p = HeapObject::FromAddress(new_addr);
}
}
-static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
- MapWord map_word = HeapObject::cast(*p)->map_word();
-
- if (map_word.IsForwardingAddress()) {
- return String::cast(map_word.ToForwardingAddress());
- }
-
- return String::cast(*p);
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
+ Address old_addr = HeapObject::cast(*p)->address();
+ Address new_addr = Memory::Address_at(old_addr);
+ return String::cast(HeapObject::FromAddress(new_addr));
}
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
- int object_size) {
+static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
Object* result;
- if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
+ if (object_size > heap->MaxObjectSizeInPagedSpace()) {
MaybeObject* maybe_result =
- heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
+ heap->lo_space()->AllocateRawFixedArray(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
- object_size,
- LO_SPACE);
- heap()->mark_compact_collector()->tracer()->
+ MigrateObject(heap, target->address(), object->address(), object_size,
+ true);
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
} else {
- OldSpace* target_space = heap()->TargetSpace(object);
+ OldSpace* target_space = heap->TargetSpace(object);
- ASSERT(target_space == heap()->old_pointer_space() ||
- target_space == heap()->old_data_space());
+ ASSERT(target_space == heap->old_pointer_space() ||
+ target_space == heap->old_data_space());
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
+ MigrateObject(heap,
+ target->address(),
object->address(),
object_size,
- target_space->identity());
- heap()->mark_compact_collector()->tracer()->
+ target_space == heap->old_pointer_space());
+ heap->mark_compact_collector()->tracer()->
increment_promoted_objects_size(object_size);
return true;
}
@@ -2585,1247 +2218,1145 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
}
-void MarkCompactCollector::EvacuateNewSpace() {
- heap()->CheckNewSpaceExpansionCriteria();
-
- NewSpace* new_space = heap()->new_space();
+static void SweepNewSpace(Heap* heap, NewSpace* space) {
+ heap->CheckNewSpaceExpansionCriteria();
- // Store allocation range before flipping semispaces.
- Address from_bottom = new_space->bottom();
- Address from_top = new_space->top();
+ Address from_bottom = space->bottom();
+ Address from_top = space->top();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
- new_space->Flip();
- new_space->ResetAllocationInfo();
+ space->Flip();
+ space->ResetAllocationInfo();
+ int size = 0;
int survivors_size = 0;
// First pass: traverse all objects in inactive semispace, remove marks,
- // migrate live objects and write forwarding addresses. This stage puts
- // new entries in the store buffer and may cause some pages to be marked
- // scan-on-scavenge.
- SemiSpaceIterator from_it(from_bottom, from_top);
- for (HeapObject* object = from_it.Next();
- object != NULL;
- object = from_it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- // Don't bother decrementing live bytes count. We'll discard the
- // entire page at the end.
- int size = object->Size();
+ // migrate live objects and write forwarding addresses.
+ for (Address current = from_bottom; current < from_top; current += size) {
+ HeapObject* object = HeapObject::FromAddress(current);
+
+ if (object->IsMarked()) {
+ object->ClearMark();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
+
+ size = object->Size();
survivors_size += size;
// Aggressively promote young survivors to the old space.
- if (TryPromoteObject(object, size)) {
+ if (TryPromoteObject(heap, object, size)) {
continue;
}
// Promotion failed. Just migrate object to another semispace.
- MaybeObject* allocation = new_space->AllocateRaw(size);
- if (allocation->IsFailure()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
- }
- allocation = new_space->AllocateRaw(size);
- ASSERT(!allocation->IsFailure());
- }
- Object* target = allocation->ToObjectUnchecked();
+ // Allocation cannot fail at this point: semispaces are of equal size.
+ Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
+ MigrateObject(heap,
+ HeapObject::cast(target)->address(),
+ current,
size,
- NEW_SPACE);
+ false);
} else {
// Process the dead object before we write a NULL into its header.
LiveObjectList::ProcessNonLive(object);
- // Mark dead objects in the new space with null in their map field.
- Memory::Address_at(object->address()) = NULL;
+ size = object->Size();
+ Memory::Address_at(current) = NULL;
}
}
- heap_->IncrementYoungSurvivorsCounter(survivors_size);
- new_space->set_age_mark(new_space->top());
-}
+ // Second pass: find pointers to new space and update them.
+ PointersToNewGenUpdatingVisitor updating_visitor(heap);
+ // Update pointers in to space.
+ Address current = space->bottom();
+ while (current < space->top()) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ current +=
+ StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
+ object);
+ }
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
- AlwaysAllocateScope always_allocate;
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptPrecisely();
+ // Update roots.
+ heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+ LiveObjectList::IterateElements(&updating_visitor);
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+ // Update pointers in old spaces.
+ heap->IterateDirtyRegions(heap->old_pointer_space(),
+ &Heap::IteratePointersInDirtyRegion,
+ &UpdatePointerToNewGen,
+ heap->WATERMARK_SHOULD_BE_VALID);
- int cell_index = Page::kFirstUsedCell;
- Address cell_base = p->ObjectAreaStart();
- int offsets[16];
+ heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
- for (cell_index = Page::kFirstUsedCell;
- cell_index < last_cell_index;
- cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
- if (cells[cell_index] == 0) continue;
+ // Update pointers from cells.
+ HeapObjectIterator cell_iterator(heap->cell_space());
+ for (HeapObject* cell = cell_iterator.next();
+ cell != NULL;
+ cell = cell_iterator.next()) {
+ if (cell->IsJSGlobalPropertyCell()) {
+ Address value_address =
+ reinterpret_cast<Address>(cell) +
+ (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+ updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ }
+ }
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
- for (int i = 0; i < live_objects; i++) {
- Address object_addr = cell_base + offsets[i] * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(object_addr);
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ // Update pointer from the global contexts list.
+ updating_visitor.VisitPointer(heap->global_contexts_list_address());
- int size = object->Size();
+ // Update pointers from external string table.
+ heap->UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateNewSpaceReferenceInExternalStringTableEntry);
- MaybeObject* target = space->AllocateRaw(size);
- if (target->IsFailure()) {
- // OS refused to give us memory.
- V8::FatalProcessOutOfMemory("Evacuation");
- return;
- }
+ // All pointers were updated. Update auxiliary allocation info.
+ heap->IncrementYoungSurvivorsCounter(survivors_size);
+ space->set_age_mark(space->top());
+
+ // Update JSFunction pointers from the runtime profiler.
+ heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+}
- Object* target_object = target->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target_object)->address(),
- object_addr,
- size,
- space->identity());
- ASSERT(object->map_word().IsForwardingAddress());
- }
+static void SweepSpace(Heap* heap, PagedSpace* space) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
- // Clear marking bits for current cell.
- cells[cell_index] = 0;
- }
- p->ResetLiveBytes();
-}
+ // During sweeping of paged space we are trying to find longest sequences
+ // of pages without live objects and free them (instead of putting them on
+ // the free list).
+
+ // Page preceding current.
+ Page* prev = Page::FromAddress(NULL);
+ // First empty page in a sequence.
+ Page* first_empty_page = Page::FromAddress(NULL);
-void MarkCompactCollector::EvacuatePages() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- ASSERT(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- if (p->IsEvacuationCandidate()) {
- // During compaction we might have to request a new page.
- // Check that space still have room for that.
- if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
- EvacuateLiveObjectsFromPage(p);
+ // Page preceding first empty page.
+ Page* prec_first_empty_page = Page::FromAddress(NULL);
+
+ // If last used page of space ends with a sequence of dead objects
+ // we can adjust allocation top instead of puting this free area into
+ // the free list. Thus during sweeping we keep track of such areas
+ // and defer their deallocation until the sweeping of the next page
+ // is done: if one of the next pages contains live objects we have
+ // to put such area into the free list.
+ Address last_free_start = NULL;
+ int last_free_size = 0;
+
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ bool is_previous_alive = true;
+ Address free_start = NULL;
+ HeapObject* object;
+
+ for (Address current = p->ObjectAreaStart();
+ current < p->AllocationTop();
+ current += object->Size()) {
+ object = HeapObject::FromAddress(current);
+ if (object->IsMarked()) {
+ object->ClearMark();
+ heap->mark_compact_collector()->tracer()->decrement_marked_count();
+
+ if (!is_previous_alive) { // Transition from free to live.
+ space->DeallocateBlock(free_start,
+ static_cast<int>(current - free_start),
+ true);
+ is_previous_alive = true;
+ }
} else {
- // Without room for expansion evacuation is not guaranteed to succeed.
- // Pessimistically abandon unevacuated pages.
- for (int j = i; j < npages; j++) {
- Page* page = evacuation_candidates_[j];
- slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
- page->ClearEvacuationCandidate();
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
+ heap->mark_compact_collector()->ReportDeleteIfNeeded(
+ object, heap->isolate());
+ if (is_previous_alive) { // Transition from live to free.
+ free_start = current;
+ is_previous_alive = false;
}
- return;
+ LiveObjectList::ProcessNonLive(object);
}
+ // The object is now unmarked for the call to Size() at the top of the
+ // loop.
}
- }
-}
+ bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
+ || (!is_previous_alive && free_start == p->ObjectAreaStart());
-class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
- public:
- virtual Object* RetainAs(Object* object) {
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MapWord map_word = heap_object->map_word();
- if (map_word.IsForwardingAddress()) {
- return map_word.ToForwardingAddress();
+ if (page_is_empty) {
+ // This page is empty. Check whether we are in the middle of
+ // sequence of empty pages and start one if not.
+ if (!first_empty_page->is_valid()) {
+ first_empty_page = p;
+ prec_first_empty_page = prev;
+ }
+
+ if (!is_previous_alive) {
+ // There are dead objects on this page. Update space accounting stats
+ // without putting anything into free list.
+ int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
+ if (size_in_bytes > 0) {
+ space->DeallocateBlock(free_start, size_in_bytes, false);
+ }
+ }
+ } else {
+ // This page is not empty. Sequence of empty pages ended on the previous
+ // one.
+ if (first_empty_page->is_valid()) {
+ space->FreePages(prec_first_empty_page, prev);
+ prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
+ }
+
+ // If there is a free ending area on one of the previous pages we have
+ // deallocate that area and put it on the free list.
+ if (last_free_size > 0) {
+ Page::FromAddress(last_free_start)->
+ SetAllocationWatermark(last_free_start);
+ space->DeallocateBlock(last_free_start, last_free_size, true);
+ last_free_start = NULL;
+ last_free_size = 0;
+ }
+
+ // If the last region of this page was not live we remember it.
+ if (!is_previous_alive) {
+ ASSERT(last_free_size == 0);
+ last_free_size = static_cast<int>(p->AllocationTop() - free_start);
+ last_free_start = free_start;
}
}
- return object;
+
+ prev = p;
}
-};
+ // We reached end of space. See if we need to adjust allocation top.
+ Address new_allocation_top = NULL;
-static inline void UpdateSlot(ObjectVisitor* v,
- SlotsBuffer::SlotType slot_type,
- Address addr) {
- switch (slot_type) {
- case SlotsBuffer::CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(v);
- break;
- }
- case SlotsBuffer::CODE_ENTRY_SLOT: {
- v->VisitCodeEntry(addr);
- break;
- }
- case SlotsBuffer::RELOCATED_CODE_OBJECT: {
- HeapObject* obj = HeapObject::FromAddress(addr);
- Code::cast(obj)->CodeIterateBody(v);
- break;
- }
- case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
- break;
- }
- case SlotsBuffer::JS_RETURN_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
- break;
- }
- case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(v);
- break;
+ if (first_empty_page->is_valid()) {
+ // Last used pages in space are empty. We can move allocation top backwards
+ // to the beginning of first empty page.
+ ASSERT(prev == space->AllocationTopPage());
+
+ new_allocation_top = first_empty_page->ObjectAreaStart();
+ }
+
+ if (last_free_size > 0) {
+ // There was a free ending area on the previous page.
+ // Deallocate it without putting it into freelist and move allocation
+ // top to the beginning of this free area.
+ space->DeallocateBlock(last_free_start, last_free_size, false);
+ new_allocation_top = last_free_start;
+ }
+
+ if (new_allocation_top != NULL) {
+#ifdef DEBUG
+ Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
+ if (!first_empty_page->is_valid()) {
+ ASSERT(new_allocation_top_page == space->AllocationTopPage());
+ } else if (last_free_size > 0) {
+ ASSERT(new_allocation_top_page == prec_first_empty_page);
+ } else {
+ ASSERT(new_allocation_top_page == first_empty_page);
}
- default:
- UNREACHABLE();
- break;
+#endif
+
+ space->SetTop(new_allocation_top);
}
}
-enum SweepingMode {
- SWEEP_ONLY,
- SWEEP_AND_VISIT_LIVE_OBJECTS
-};
+void MarkCompactCollector::EncodeForwardingAddresses() {
+ ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+ // Objects in the active semispace of the young generation may be
+ // relocated to the inactive semispace (if not promoted). Set the
+ // relocation info to the beginning of the inactive semispace.
+ heap()->new_space()->MCResetRelocationInfo();
+
+ // Compute the forwarding pointers in each space.
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
+ ReportDeleteIfNeeded>(
+ heap()->old_pointer_space());
+
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
+ IgnoreNonLiveObject>(
+ heap()->old_data_space());
+
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
+ ReportDeleteIfNeeded>(
+ heap()->code_space());
+
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
+ IgnoreNonLiveObject>(
+ heap()->cell_space());
+
+ // Compute new space next to last after the old and code spaces have been
+ // compacted. Objects in new space can be promoted to old or code space.
+ EncodeForwardingAddressesInNewSpace();
-enum SkipListRebuildingMode {
- REBUILD_SKIP_LIST,
- IGNORE_SKIP_LIST
+ // Compute map space last because computing forwarding addresses
+ // overwrites non-live objects. Objects in the other spaces rely on
+ // non-live map pointers to get the sizes of non-live objects.
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
+ IgnoreNonLiveObject>(
+ heap()->map_space());
+
+ // Write relocation info to the top page, so we can use it later. This is
+ // done after promoting objects from the new space so we get the correct
+ // allocation top.
+ heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
+ heap()->old_data_space()->MCWriteRelocationInfoToPage();
+ heap()->code_space()->MCWriteRelocationInfoToPage();
+ heap()->map_space()->MCWriteRelocationInfoToPage();
+ heap()->cell_space()->MCWriteRelocationInfoToPage();
+}
+
+
+class MapIterator : public HeapObjectIterator {
+ public:
+ explicit MapIterator(Heap* heap)
+ : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
+
+ MapIterator(Heap* heap, Address start)
+ : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
+
+ private:
+ static int SizeCallback(HeapObject* unused) {
+ USE(unused);
+ return Map::kSize;
+ }
};
-// Sweep a space precisely. After this has been done the space can
-// be iterated precisely, hitting only the live objects. Code space
-// is always swept precisely because we want to be able to iterate
-// over it. Map space is swept precisely, because it is not compacted.
-// Slots in live objects pointing into evacuation candidates are updated
-// if requested.
-template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
-static void SweepPrecisely(PagedSpace* space,
- Page* p,
- ObjectVisitor* v) {
- ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
- ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
- space->identity() == CODE_SPACE);
- ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
-
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptPrecisely();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
-
- int cell_index = Page::kFirstUsedCell;
- Address free_start = p->ObjectAreaStart();
- ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- Address object_address = p->ObjectAreaStart();
- int offsets[16];
-
- SkipList* skip_list = p->skip_list();
- int curr_region = -1;
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
- skip_list->Clear();
- }
-
- for (cell_index = Page::kFirstUsedCell;
- cell_index < last_cell_index;
- cell_index++, object_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(object_address))));
- int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
- int live_index = 0;
- for ( ; live_objects != 0; live_objects--) {
- Address free_end = object_address + offsets[live_index++] * kPointerSize;
- if (free_end != free_start) {
- space->Free(free_start, static_cast<int>(free_end - free_start));
- }
- HeapObject* live_object = HeapObject::FromAddress(free_end);
- ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
- Map* map = live_object->map();
- int size = live_object->SizeFromMap(map);
- if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
- live_object->IterateBody(map->instance_type(), size, v);
- }
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
- int new_region_start =
- SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region ||
- new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
- }
- free_start = free_end + size;
+class MapCompact {
+ public:
+ explicit MapCompact(Heap* heap, int live_maps)
+ : heap_(heap),
+ live_maps_(live_maps),
+ to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
+ vacant_map_it_(heap),
+ map_to_evacuate_it_(heap, to_evacuate_start_),
+ first_map_to_evacuate_(
+ reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
+ }
+
+ void CompactMaps() {
+ // As we know the number of maps to evacuate beforehand,
+ // we stop then there is no more vacant maps.
+ for (Map* next_vacant_map = NextVacantMap();
+ next_vacant_map;
+ next_vacant_map = NextVacantMap()) {
+ EvacuateMap(next_vacant_map, NextMapToEvacuate());
}
- // Clear marking bits for current cell.
- cells[cell_index] = 0;
+
+#ifdef DEBUG
+ CheckNoMapsToEvacuate();
+#endif
}
- if (free_start != p->ObjectAreaEnd()) {
- space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
+
+ void UpdateMapPointersInRoots() {
+ MapUpdatingVisitor map_updating_visitor;
+ heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+ heap()->isolate()->global_handles()->IterateWeakRoots(
+ &map_updating_visitor);
+ LiveObjectList::IterateElements(&map_updating_visitor);
}
- p->ResetLiveBytes();
-}
+ void UpdateMapPointersInPagedSpace(PagedSpace* space) {
+ ASSERT(space != heap()->map_space());
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
- Page* p = Page::FromAddress(code->address());
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ UpdateMapPointersInRange(heap(),
+ p->ObjectAreaStart(),
+ p->AllocationTop());
+ }
+ }
- if (p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- return false;
+ void UpdateMapPointersInNewSpace() {
+ NewSpace* space = heap()->new_space();
+ UpdateMapPointersInRange(heap(), space->bottom(), space->top());
}
- Address code_start = code->address();
- Address code_end = code_start + code->Size();
+ void UpdateMapPointersInLargeObjectSpace() {
+ LargeObjectIterator it(heap()->lo_space());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ UpdateMapPointersInObject(heap(), obj);
+ }
- uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
- uint32_t end_index =
- MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+ void Finish() {
+ heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+ }
- Bitmap* b = p->markbits();
+ inline Heap* heap() const { return heap_; }
- MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
- MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+ private:
+ Heap* heap_;
+ int live_maps_;
+ Address to_evacuate_start_;
+ MapIterator vacant_map_it_;
+ MapIterator map_to_evacuate_it_;
+ Map* first_map_to_evacuate_;
- MarkBit::CellType* start_cell = start_mark_bit.cell();
- MarkBit::CellType* end_cell = end_mark_bit.cell();
+ // Helper class for updating map pointers in HeapObjects.
+ class MapUpdatingVisitor: public ObjectVisitor {
+ public:
+ MapUpdatingVisitor() {}
- if (value) {
- MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
- MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+ void VisitPointer(Object** p) {
+ UpdateMapPointer(p);
+ }
- if (start_cell == end_cell) {
- *start_cell |= start_mask & end_mask;
- } else {
- *start_cell |= start_mask;
- for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
- *cell = ~0;
- }
- *end_cell |= end_mask;
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) UpdateMapPointer(p);
}
- } else {
- for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
- *cell = 0;
+
+ private:
+ void UpdateMapPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+ HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
+
+ // Moved maps are tagged with overflowed map word. They are the only
+ // objects those map word is overflowed as marking is already complete.
+ MapWord map_word = old_map->map_word();
+ if (!map_word.IsOverflowed()) return;
+
+ *p = GetForwardedMap(map_word);
}
- }
+ };
- return true;
-}
+ static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
+ while (true) {
+ HeapObject* next = it->next();
+ ASSERT(next != NULL);
+ if (next == last)
+ return NULL;
+ ASSERT(!next->IsOverflowed());
+ ASSERT(!next->IsMarked());
+ ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
+ if (next->IsMap() == live)
+ return reinterpret_cast<Map*>(next);
+ }
+ }
+ Map* NextVacantMap() {
+ Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
+ ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
+ return map;
+ }
-static bool IsOnInvalidatedCodeObject(Address addr) {
- // We did not record any slots in large objects thus
- // we can safely go to the page from the slot address.
- Page* p = Page::FromAddress(addr);
+ Map* NextMapToEvacuate() {
+ Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
+ ASSERT(map != NULL);
+ ASSERT(map->IsMap());
+ return map;
+ }
- // First check owner's identity because old pointer and old data spaces
- // are swept lazily and might still have non-zero mark-bits on some
- // pages.
- if (p->owner()->identity() != CODE_SPACE) return false;
+ static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
+ ASSERT(FreeListNode::IsFreeListNode(vacant_map));
+ ASSERT(map_to_evacuate->IsMap());
- // In code space only bits on evacuation candidates (but we don't record
- // any slots on them) and under invalidated code objects are non-zero.
- MarkBit mark_bit =
- p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+ ASSERT(Map::kSize % 4 == 0);
- return mark_bit.Get();
-}
+ map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
+ vacant_map->address(), map_to_evacuate->address(), Map::kSize);
+ ASSERT(vacant_map->IsMap()); // Due to memcpy above.
-void MarkCompactCollector::InvalidateCode(Code* code) {
- if (heap_->incremental_marking()->IsCompacting() &&
- !ShouldSkipEvacuationSlotRecording(code)) {
- ASSERT(compacting_);
+ MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
+ forwarding_map_word.SetOverflow();
+ map_to_evacuate->set_map_word(forwarding_map_word);
- // If the object is white than no slots were recorded on it yet.
- MarkBit mark_bit = Marking::MarkBitFrom(code);
- if (Marking::IsWhite(mark_bit)) return;
+ ASSERT(map_to_evacuate->map_word().IsOverflowed());
+ ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
+ }
- invalidated_code_.Add(code);
+ static Map* GetForwardedMap(MapWord map_word) {
+ ASSERT(map_word.IsOverflowed());
+ map_word.ClearOverflow();
+ Map* new_map = map_word.ToMap();
+ ASSERT_MAP_ALIGNED(new_map->address());
+ return new_map;
}
-}
+ static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
+ ASSERT(!obj->IsMarked());
+ Map* map = obj->map();
+ ASSERT(heap->map_space()->Contains(map));
+ MapWord map_word = map->map_word();
+ ASSERT(!map_word.IsMarked());
+ if (map_word.IsOverflowed()) {
+ Map* new_map = GetForwardedMap(map_word);
+ ASSERT(heap->map_space()->Contains(new_map));
+ obj->set_map(new_map);
-bool MarkCompactCollector::MarkInvalidatedCode() {
- bool code_marked = false;
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n",
+ obj->address(),
+ reinterpret_cast<void*>(map),
+ reinterpret_cast<void*>(new_map));
+ }
+#endif
+ }
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
+ int size = obj->SizeFromMap(map);
+ MapUpdatingVisitor map_updating_visitor;
+ obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
+ return size;
+ }
- if (SetMarkBitsUnderInvalidatedCode(code, true)) {
- code_marked = true;
+ static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
+ HeapObject* object;
+ int size;
+ for (Address current = start; current < end; current += size) {
+ object = HeapObject::FromAddress(current);
+ size = UpdateMapPointersInObject(heap, object);
+ ASSERT(size > 0);
}
}
- return code_marked;
-}
+#ifdef DEBUG
+ void CheckNoMapsToEvacuate() {
+ if (!FLAG_enable_slow_asserts)
+ return;
+
+ for (HeapObject* obj = map_to_evacuate_it_.next();
+ obj != NULL; obj = map_to_evacuate_it_.next())
+ ASSERT(FreeListNode::IsFreeListNode(obj));
+ }
+#endif
+};
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+void MarkCompactCollector::SweepSpaces() {
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
+
+ ASSERT(state_ == SWEEP_SPACES);
+ ASSERT(!IsCompacting());
+ // Noncompacting collections simply sweep the spaces to clear the mark
+ // bits and free the nonlive blocks (for old and map spaces). We sweep
+ // the map space last because freeing non-live maps overwrites them and
+ // the other spaces rely on possibly non-live maps to get the sizes for
+ // non-live objects.
+ SweepSpace(heap(), heap()->old_pointer_space());
+ SweepSpace(heap(), heap()->old_data_space());
+ SweepSpace(heap(), heap()->code_space());
+ SweepSpace(heap(), heap()->cell_space());
+ { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+ SweepNewSpace(heap(), heap()->new_space());
+ }
+ SweepSpace(heap(), heap()->map_space());
+
+ heap()->IterateDirtyRegions(heap()->map_space(),
+ &heap()->IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ heap()->WATERMARK_SHOULD_BE_VALID);
+
+ intptr_t live_maps_size = heap()->map_space()->Size();
+ int live_maps = static_cast<int>(live_maps_size / Map::kSize);
+ ASSERT(live_map_objects_size_ == live_maps_size);
+
+ if (heap()->map_space()->NeedsCompaction(live_maps)) {
+ MapCompact map_compact(heap(), live_maps);
+
+ map_compact.CompactMaps();
+ map_compact.UpdateMapPointersInRoots();
+
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL; space = spaces.next()) {
+ if (space == heap()->map_space()) continue;
+ map_compact.UpdateMapPointersInPagedSpace(space);
+ }
+ map_compact.UpdateMapPointersInNewSpace();
+ map_compact.UpdateMapPointersInLargeObjectSpace();
+
+ map_compact.Finish();
}
}
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
- if (code != NULL) {
- code->Iterate(visitor);
- SetMarkBitsUnderInvalidatedCode(code, false);
+// Iterate the live objects in a range of addresses (eg, a page or a
+// semispace). The live regions of the range have been linked into a list.
+// The first live region is [first_live_start, first_live_end), and the last
+// address in the range is top. The callback function is used to get the
+// size of each live object.
+int MarkCompactCollector::IterateLiveObjectsInRange(
+ Address start,
+ Address end,
+ LiveObjectCallback size_func) {
+ int live_objects_size = 0;
+ Address current = start;
+ while (current < end) {
+ uint32_t encoded_map = Memory::uint32_at(current);
+ if (encoded_map == kSingleFreeEncoding) {
+ current += kPointerSize;
+ } else if (encoded_map == kMultiFreeEncoding) {
+ current += Memory::int_at(current + kIntSize);
+ } else {
+ int size = (this->*size_func)(HeapObject::FromAddress(current));
+ current += size;
+ live_objects_size += size;
}
}
- invalidated_code_.Rewind(0);
+ return live_objects_size;
}
-void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
- bool code_slots_filtering_required = MarkInvalidatedCode();
+int MarkCompactCollector::IterateLiveObjects(
+ NewSpace* space, LiveObjectCallback size_f) {
+ ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+ return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
+}
- EvacuateNewSpace();
- EvacuatePages();
- // Second pass: find pointers to new space and update them.
- PointersUpdatingVisitor updating_visitor(heap());
+int MarkCompactCollector::IterateLiveObjects(
+ PagedSpace* space, LiveObjectCallback size_f) {
+ ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+ int total = 0;
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
+ p->AllocationTop(),
+ size_f);
+ }
+ return total;
+}
- // Update pointers in to space.
- SemiSpaceIterator to_it(heap()->new_space()->bottom(),
- heap()->new_space()->top());
- for (HeapObject* object = to_it.Next();
- object != NULL;
- object = to_it.Next()) {
- Map* map = object->map();
- object->IterateBody(map->instance_type(),
- object->SizeFromMap(map),
- &updating_visitor);
+
+// -------------------------------------------------------------------------
+// Phase 3: Update pointers
+
+// Helper class for updating pointers in HeapObjects.
+class UpdatingVisitor: public ObjectVisitor {
+ public:
+ explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
+
+ void VisitPointer(Object** p) {
+ UpdatePointer(p);
}
- // Update roots.
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- LiveObjectList::IterateElements(&updating_visitor);
+ void VisitPointers(Object** start, Object** end) {
+ // Mark all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) UpdatePointer(p);
+ }
- {
- StoreBufferRebuildScope scope(heap_,
- heap_->store_buffer(),
- &Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
- }
-
- SlotsBuffer::UpdateSlotsRecordedIn(heap_,
- migration_slots_buffer_,
- code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
- PrintF(" migration slots buffer: %d\n",
- SlotsBuffer::SizeOfChain(migration_slots_buffer_));
- }
-
- if (compacting_ && was_marked_incrementally_) {
- // It's difficult to filter out slots recorded for large objects.
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- // LargeObjectSpace is not swept yet thus we have to skip
- // dead objects explicitly.
- if (!IsMarked(obj)) continue;
-
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- obj->Iterate(&updating_visitor);
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VisitPointer(&target);
+ rinfo->set_target_address(
+ reinterpret_cast<Code*>(target)->instruction_start());
+ }
+
+ void VisitDebugTarget(RelocInfo* rinfo) {
+ ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+ rinfo->IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ VisitPointer(&target);
+ rinfo->set_call_address(
+ reinterpret_cast<Code*>(target)->instruction_start());
}
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- ASSERT(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ inline Heap* heap() const { return heap_; }
- if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_,
- p->slots_buffer(),
- code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
- PrintF(" page %p slots buffer: %d\n",
- reinterpret_cast<void*>(p),
- SlotsBuffer::SizeOfChain(p->slots_buffer()));
+ private:
+ void UpdatePointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+ Address old_addr = obj->address();
+ Address new_addr;
+ ASSERT(!heap()->InFromSpace(obj));
+
+ if (heap()->new_space()->Contains(obj)) {
+ Address forwarding_pointer_addr =
+ heap()->new_space()->FromSpaceLow() +
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
+ new_addr = Memory::Address_at(forwarding_pointer_addr);
+
+#ifdef DEBUG
+ ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
+ heap()->old_data_space()->Contains(new_addr) ||
+ heap()->new_space()->FromSpaceContains(new_addr) ||
+ heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+
+ if (heap()->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
}
+#endif
+
+ } else if (heap()->lo_space()->Contains(obj)) {
+ // Don't move objects in the large object space.
+ return;
- // Important: skip list should be cleared only after roots were updated
- // because root iteration traverses the stack and might have to find code
- // objects from non-updated pc pointing into evacuation candidate.
- SkipList* list = p->skip_list();
- if (list != NULL) list->Clear();
} else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
- reinterpret_cast<intptr_t>(p));
- }
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-
- switch (space->identity()) {
- case OLD_DATA_SPACE:
- SweepConservatively(space, p);
- break;
- case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
- space, p, &updating_visitor);
- break;
- case CODE_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
- space, p, &updating_visitor);
- break;
- default:
- UNREACHABLE();
- break;
+#ifdef DEBUG
+ PagedSpaces spaces;
+ PagedSpace* original_space = spaces.next();
+ while (original_space != NULL) {
+ if (original_space->Contains(obj)) break;
+ original_space = spaces.next();
}
+ ASSERT(original_space != NULL);
+#endif
+ new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
+ ASSERT(original_space->Contains(new_addr));
+ ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
+ original_space->MCSpaceOffsetForAddress(old_addr));
}
- }
- // Update pointers from cells.
- HeapObjectIterator cell_iterator(heap_->cell_space());
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL;
- cell = cell_iterator.Next()) {
- if (cell->IsJSGlobalPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ *p = HeapObject::FromAddress(new_addr);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n",
+ reinterpret_cast<Address>(p), old_addr, new_addr);
}
+#endif
}
- // Update pointer from the global contexts list.
- updating_visitor.VisitPointer(heap_->global_contexts_list_address());
-
- heap_->symbol_table()->Iterate(&updating_visitor);
+ Heap* heap_;
+};
- // Update pointers from external string table.
- heap_->UpdateReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
- // Update JSFunction pointers from the runtime profiler.
+void MarkCompactCollector::UpdatePointers() {
+#ifdef DEBUG
+ ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+ state_ = UPDATE_POINTERS;
+#endif
+ UpdatingVisitor updating_visitor(heap());
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
&updating_visitor);
+ heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+ heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessWeakReferences(&evacuation_object_retainer);
+ // Update the pointer to the head of the weak list of global contexts.
+ updating_visitor.VisitPointer(&heap()->global_contexts_list_);
- // Visit invalidated code (we ignored all slots on it) and clear mark-bits
- // under it.
- ProcessInvalidatedCode(&updating_visitor);
+ LiveObjectList::IterateElements(&updating_visitor);
+ int live_maps_size = IterateLiveObjects(
+ heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap()->old_pointer_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap()->old_data_space(),
+ &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_codes_size = IterateLiveObjects(
+ heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_cells_size = IterateLiveObjects(
+ heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+ int live_news_size = IterateLiveObjects(
+ heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
+
+ // Large objects do not move, the map word can be updated directly.
+ LargeObjectIterator it(heap()->lo_space());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ UpdatePointersInNewObject(obj);
+ }
+
+ USE(live_maps_size);
+ USE(live_pointer_olds_size);
+ USE(live_data_olds_size);
+ USE(live_codes_size);
+ USE(live_cells_size);
+ USE(live_news_size);
+ ASSERT(live_maps_size == live_map_objects_size_);
+ ASSERT(live_data_olds_size == live_old_data_objects_size_);
+ ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+ ASSERT(live_codes_size == live_code_objects_size_);
+ ASSERT(live_cells_size == live_cell_objects_size_);
+ ASSERT(live_news_size == live_young_objects_size_);
+}
+
+
+int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
+ // Keep old map pointers
+ Map* old_map = obj->map();
+ ASSERT(old_map->IsHeapObject());
+
+ Address forwarded = GetForwardingAddressInOldSpace(old_map);
+
+ ASSERT(heap()->map_space()->Contains(old_map));
+ ASSERT(heap()->map_space()->Contains(forwarded));
#ifdef DEBUG
- if (FLAG_verify_heap) {
- VerifyEvacuation(heap_);
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
+ forwarded);
}
#endif
+ // Update the map pointer.
+ obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
- slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
- ASSERT(migration_slots_buffer_ == NULL);
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- if (!p->IsEvacuationCandidate()) continue;
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
- p->set_scan_on_scavenge(false);
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
- p->ClearEvacuationCandidate();
- }
- evacuation_candidates_.Rewind(0);
- compacting_ = false;
+ // We have to compute the object size relying on the old map because
+ // map objects are not relocated yet.
+ int obj_size = obj->SizeFromMap(old_map);
+
+ // Update pointers in the object body.
+ UpdatingVisitor updating_visitor(heap());
+ obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
+ return obj_size;
}
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
+int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
+ // Decode the map pointer.
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte. The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
- 0, _, _, _, _, // 0
- 1, 0, _, _, _, // 1
- 1, 1, _, _, _, // 2
- X, _, _, _, _, // 3
- 1, 2, _, _, _, // 4
- 2, 0, 2, _, _, // 5
- X, _, _, _, _, // 6
- X, _, _, _, _, // 7
- 1, 3, _, _, _, // 8
- 2, 0, 3, _, _, // 9
- 2, 1, 3, _, _, // 10
- X, _, _, _, _, // 11
- X, _, _, _, _, // 12
- X, _, _, _, _, // 13
- X, _, _, _, _, // 14
- X, _, _, _, _, // 15
- 1, 4, _, _, _, // 16
- 2, 0, 4, _, _, // 17
- 2, 1, 4, _, _, // 18
- X, _, _, _, _, // 19
- 2, 2, 4, _, _, // 20
- 3, 0, 2, 4, _, // 21
- X, _, _, _, _, // 22
- X, _, _, _, _, // 23
- X, _, _, _, _, // 24
- X, _, _, _, _, // 25
- X, _, _, _, _, // 26
- X, _, _, _, _, // 27
- X, _, _, _, _, // 28
- X, _, _, _, _, // 29
- X, _, _, _, _, // 30
- X, _, _, _, _, // 31
- 1, 5, _, _, _, // 32
- 2, 0, 5, _, _, // 33
- 2, 1, 5, _, _, // 34
- X, _, _, _, _, // 35
- 2, 2, 5, _, _, // 36
- 3, 0, 2, 5, _, // 37
- X, _, _, _, _, // 38
- X, _, _, _, _, // 39
- 2, 3, 5, _, _, // 40
- 3, 0, 3, 5, _, // 41
- 3, 1, 3, 5, _, // 42
- X, _, _, _, _, // 43
- X, _, _, _, _, // 44
- X, _, _, _, _, // 45
- X, _, _, _, _, // 46
- X, _, _, _, _, // 47
- X, _, _, _, _, // 48
- X, _, _, _, _, // 49
- X, _, _, _, _, // 50
- X, _, _, _, _, // 51
- X, _, _, _, _, // 52
- X, _, _, _, _, // 53
- X, _, _, _, _, // 54
- X, _, _, _, _, // 55
- X, _, _, _, _, // 56
- X, _, _, _, _, // 57
- X, _, _, _, _, // 58
- X, _, _, _, _, // 59
- X, _, _, _, _, // 60
- X, _, _, _, _, // 61
- X, _, _, _, _, // 62
- X, _, _, _, _, // 63
- 1, 6, _, _, _, // 64
- 2, 0, 6, _, _, // 65
- 2, 1, 6, _, _, // 66
- X, _, _, _, _, // 67
- 2, 2, 6, _, _, // 68
- 3, 0, 2, 6, _, // 69
- X, _, _, _, _, // 70
- X, _, _, _, _, // 71
- 2, 3, 6, _, _, // 72
- 3, 0, 3, 6, _, // 73
- 3, 1, 3, 6, _, // 74
- X, _, _, _, _, // 75
- X, _, _, _, _, // 76
- X, _, _, _, _, // 77
- X, _, _, _, _, // 78
- X, _, _, _, _, // 79
- 2, 4, 6, _, _, // 80
- 3, 0, 4, 6, _, // 81
- 3, 1, 4, 6, _, // 82
- X, _, _, _, _, // 83
- 3, 2, 4, 6, _, // 84
- 4, 0, 2, 4, 6, // 85
- X, _, _, _, _, // 86
- X, _, _, _, _, // 87
- X, _, _, _, _, // 88
- X, _, _, _, _, // 89
- X, _, _, _, _, // 90
- X, _, _, _, _, // 91
- X, _, _, _, _, // 92
- X, _, _, _, _, // 93
- X, _, _, _, _, // 94
- X, _, _, _, _, // 95
- X, _, _, _, _, // 96
- X, _, _, _, _, // 97
- X, _, _, _, _, // 98
- X, _, _, _, _, // 99
- X, _, _, _, _, // 100
- X, _, _, _, _, // 101
- X, _, _, _, _, // 102
- X, _, _, _, _, // 103
- X, _, _, _, _, // 104
- X, _, _, _, _, // 105
- X, _, _, _, _, // 106
- X, _, _, _, _, // 107
- X, _, _, _, _, // 108
- X, _, _, _, _, // 109
- X, _, _, _, _, // 110
- X, _, _, _, _, // 111
- X, _, _, _, _, // 112
- X, _, _, _, _, // 113
- X, _, _, _, _, // 114
- X, _, _, _, _, // 115
- X, _, _, _, _, // 116
- X, _, _, _, _, // 117
- X, _, _, _, _, // 118
- X, _, _, _, _, // 119
- X, _, _, _, _, // 120
- X, _, _, _, _, // 121
- X, _, _, _, _, // 122
- X, _, _, _, _, // 123
- X, _, _, _, _, // 124
- X, _, _, _, _, // 125
- X, _, _, _, _, // 126
- X, _, _, _, _, // 127
- 1, 7, _, _, _, // 128
- 2, 0, 7, _, _, // 129
- 2, 1, 7, _, _, // 130
- X, _, _, _, _, // 131
- 2, 2, 7, _, _, // 132
- 3, 0, 2, 7, _, // 133
- X, _, _, _, _, // 134
- X, _, _, _, _, // 135
- 2, 3, 7, _, _, // 136
- 3, 0, 3, 7, _, // 137
- 3, 1, 3, 7, _, // 138
- X, _, _, _, _, // 139
- X, _, _, _, _, // 140
- X, _, _, _, _, // 141
- X, _, _, _, _, // 142
- X, _, _, _, _, // 143
- 2, 4, 7, _, _, // 144
- 3, 0, 4, 7, _, // 145
- 3, 1, 4, 7, _, // 146
- X, _, _, _, _, // 147
- 3, 2, 4, 7, _, // 148
- 4, 0, 2, 4, 7, // 149
- X, _, _, _, _, // 150
- X, _, _, _, _, // 151
- X, _, _, _, _, // 152
- X, _, _, _, _, // 153
- X, _, _, _, _, // 154
- X, _, _, _, _, // 155
- X, _, _, _, _, // 156
- X, _, _, _, _, // 157
- X, _, _, _, _, // 158
- X, _, _, _, _, // 159
- 2, 5, 7, _, _, // 160
- 3, 0, 5, 7, _, // 161
- 3, 1, 5, 7, _, // 162
- X, _, _, _, _, // 163
- 3, 2, 5, 7, _, // 164
- 4, 0, 2, 5, 7, // 165
- X, _, _, _, _, // 166
- X, _, _, _, _, // 167
- 3, 3, 5, 7, _, // 168
- 4, 0, 3, 5, 7, // 169
- 4, 1, 3, 5, 7 // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits. Returns the number of objects that start in the
-// range. Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
- int objects = 0;
- int offset = 0;
-
- // No consecutive 1 bits.
- ASSERT((mark_bits & 0x180) != 0x180);
- ASSERT((mark_bits & 0x18000) != 0x18000);
- ASSERT((mark_bits & 0x1800000) != 0x1800000);
-
- while (mark_bits != 0) {
- int byte = (mark_bits & 0xff);
- mark_bits >>= 8;
- if (byte != 0) {
- ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
- char* table = kStartTable + byte * kStartTableEntriesPerLine;
- int objects_in_these_8_words = table[0];
- ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
- ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
- for (int i = 0; i < objects_in_these_8_words; i++) {
- starts[objects++] = offset + table[1 + i];
- }
- }
- offset += 8;
+ // At this point, the first word of map_addr is also encoded, cannot
+ // cast it to Map* using Map::cast.
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
+ int obj_size = obj->SizeFromMap(map);
+ InstanceType type = map->instance_type();
+
+ // Update map pointer.
+ Address new_map_addr = GetForwardingAddressInOldSpace(map);
+ int offset = encoding.DecodeOffset();
+ obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n", obj->address(),
+ map_addr, new_map_addr);
}
- return objects;
+#endif
+
+ // Update pointers in the object body.
+ UpdatingVisitor updating_visitor(heap());
+ obj->IterateBody(type, obj_size, &updating_visitor);
+ return obj_size;
}
-static inline Address DigestFreeStart(Address approximate_free_start,
- uint32_t free_start_cell) {
- ASSERT(free_start_cell != 0);
+Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
+ // Object should either in old or map space.
+ MapWord encoding = obj->map_word();
- // No consecutive 1 bits.
- ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
+ // Offset to the first live object's forwarding address.
+ int offset = encoding.DecodeOffset();
+ Address obj_addr = obj->address();
- int offsets[16];
- uint32_t cell = free_start_cell;
- int offset_of_last_live;
- if ((cell & 0x80000000u) != 0) {
- // This case would overflow below.
- offset_of_last_live = 31;
- } else {
- // Remove all but one bit, the most significant. This is an optimization
- // that may or may not be worthwhile.
- cell |= cell >> 16;
- cell |= cell >> 8;
- cell |= cell >> 4;
- cell |= cell >> 2;
- cell |= cell >> 1;
- cell = (cell + 1) >> 1;
- int live_objects = MarkWordToObjectStarts(cell, offsets);
- ASSERT(live_objects == 1);
- offset_of_last_live = offsets[live_objects - 1];
- }
- Address last_live_start =
- approximate_free_start + offset_of_last_live * kPointerSize;
- HeapObject* last_live = HeapObject::FromAddress(last_live_start);
- Address free_start = last_live_start + last_live->Size();
- return free_start;
-}
-
-
-static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
- ASSERT(cell != 0);
-
- // No consecutive 1 bits.
- ASSERT((cell & (cell << 1)) == 0);
-
- int offsets[16];
- if (cell == 0x80000000u) { // Avoid overflow below.
- return block_address + 31 * kPointerSize;
- }
- uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
- ASSERT((first_set_bit & cell) == first_set_bit);
- int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
- ASSERT(live_objects == 1);
- USE(live_objects);
- return block_address + offsets[0] * kPointerSize;
-}
-
-
-// Sweeps a space conservatively. After this has been done the larger free
-// spaces have been put on the free list and the smaller ones have been
-// ignored and left untouched. A free space is always either ignored or put
-// on the free list, never split up into two parts. This is important
-// because it means that any FreeSpace maps left actually describe a region of
-// memory that can be ignored when scanning. Dead objects other than free
-// spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
- ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
- MarkBit::CellType* cells = p->markbits()->cells();
- p->MarkSweptConservatively();
-
- int last_cell_index =
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
-
- int cell_index = Page::kFirstUsedCell;
- intptr_t freed_bytes = 0;
-
- // This is the start of the 32 word block that we are currently looking at.
- Address block_address = p->ObjectAreaStart();
-
- // Skip over all the dead objects at the start of the page and mark them free.
- for (cell_index = Page::kFirstUsedCell;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- if (cells[cell_index] != 0) break;
- }
- size_t size = block_address - p->ObjectAreaStart();
- if (cell_index == last_cell_index) {
- freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
- static_cast<int>(size)));
- ASSERT_EQ(0, p->LiveBytes());
- return freed_bytes;
- }
- // Grow the size of the start-of-page free space a little to get up to the
- // first live object.
- Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
- // Free the first free space.
- size = free_end - p->ObjectAreaStart();
- freed_bytes += space->Free(p->ObjectAreaStart(),
- static_cast<int>(size));
- // The start of the current free area is represented in undigested form by
- // the address of the last 32-word section that contained a live object and
- // the marking bitmap for that cell, which describes where the live object
- // started. Unless we find a large free space in the bitmap we will not
- // digest this pair into a real address. We start the iteration here at the
- // first word in the marking bit map that indicates a live object.
- Address free_start = block_address;
- uint32_t free_start_cell = cells[cell_index];
-
- for ( ;
- cell_index < last_cell_index;
- cell_index++, block_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(block_address))));
- uint32_t cell = cells[cell_index];
- if (cell != 0) {
- // We have a live object. Check approximately whether it is more than 32
- // words since the last live object.
- if (block_address - free_start > 32 * kPointerSize) {
- free_start = DigestFreeStart(free_start, free_start_cell);
- if (block_address - free_start > 32 * kPointerSize) {
- // Now that we know the exact start of the free space it still looks
- // like we have a large enough free space to be worth bothering with.
- // so now we need to find the start of the first live object at the
- // end of the free space.
- free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
- }
- }
- // Update our undigested record of where the current free area started.
- free_start = block_address;
- free_start_cell = cell;
- // Clear marking bits for current cell.
- cells[cell_index] = 0;
- }
- }
+ // Find the first live object's forwarding address.
+ Page* p = Page::FromAddress(obj_addr);
+ Address first_forwarded = p->mc_first_forwarded;
+
+ // Page start address of forwarded address.
+ Page* forwarded_page = Page::FromAddress(first_forwarded);
+ int forwarded_offset = forwarded_page->Offset(first_forwarded);
- // Handle the free space at the end of the page.
- if (block_address - free_start > 32 * kPointerSize) {
- free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+ // Find end of allocation in the page of first_forwarded.
+ int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
+
+ // Check if current object's forward pointer is in the same page
+ // as the first live object's forwarding pointer
+ if (forwarded_offset + offset < mc_top_offset) {
+ // In the same page.
+ return first_forwarded + offset;
}
- p->ResetLiveBytes();
- return freed_bytes;
-}
+ // Must be in the next page, NOTE: this may cross chunks.
+ Page* next_page = forwarded_page->next_page();
+ ASSERT(next_page->is_valid());
+ offset -= (mc_top_offset - forwarded_offset);
+ offset += Page::kObjectStartOffset;
-void MarkCompactCollector::SweepSpace(PagedSpace* space,
- SweeperType sweeper) {
- space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == LAZY_CONSERVATIVE);
+ ASSERT_PAGE_OFFSET(offset);
+ ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
- space->ClearStats();
+ return next_page->OffsetToAddress(offset);
+}
- PageIterator it(space);
- intptr_t freed_bytes = 0;
- intptr_t newspace_size = space->heap()->new_space()->Size();
- bool lazy_sweeping_active = false;
- bool unused_page_present = false;
+// -------------------------------------------------------------------------
+// Phase 4: Relocate objects
- while (it.has_next()) {
- Page* p = it.next();
+void MarkCompactCollector::RelocateObjects() {
+#ifdef DEBUG
+ ASSERT(state_ == UPDATE_POINTERS);
+ state_ = RELOCATE_OBJECTS;
+#endif
+ // Relocates objects, always relocate map objects first. Relocating
+ // objects in other space relies on map objects to get object size.
+ int live_maps_size = IterateLiveObjects(
+ heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
+ int live_pointer_olds_size = IterateLiveObjects(
+ heap()->old_pointer_space(),
+ &MarkCompactCollector::RelocateOldPointerObject);
+ int live_data_olds_size = IterateLiveObjects(
+ heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+ int live_codes_size = IterateLiveObjects(
+ heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
+ int live_cells_size = IterateLiveObjects(
+ heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
+ int live_news_size = IterateLiveObjects(
+ heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
+
+ USE(live_maps_size);
+ USE(live_pointer_olds_size);
+ USE(live_data_olds_size);
+ USE(live_codes_size);
+ USE(live_cells_size);
+ USE(live_news_size);
+ ASSERT(live_maps_size == live_map_objects_size_);
+ ASSERT(live_data_olds_size == live_old_data_objects_size_);
+ ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+ ASSERT(live_codes_size == live_code_objects_size_);
+ ASSERT(live_cells_size == live_cell_objects_size_);
+ ASSERT(live_news_size == live_young_objects_size_);
+
+ // Flip from and to spaces
+ heap()->new_space()->Flip();
+
+ heap()->new_space()->MCCommitRelocationInfo();
+
+ // Set age_mark to bottom in to space
+ Address mark = heap()->new_space()->bottom();
+ heap()->new_space()->set_age_mark(mark);
- // Clear sweeping flags indicating that marking bits are still intact.
- p->ClearSweptPrecisely();
- p->ClearSweptConservatively();
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+ space->MCCommitRelocationInfo();
- if (p->IsEvacuationCandidate()) {
- ASSERT(evacuation_candidates_.length() > 0);
- continue;
- }
+ heap()->CheckNewSpaceExpansionCriteria();
+ heap()->IncrementYoungSurvivorsCounter(live_news_size);
+}
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- // Will be processed in EvacuateNewSpaceAndCandidates.
- continue;
- }
- if (lazy_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- continue;
- }
+int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
+ // Recover map pointer.
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
- // One unused page is kept, all further are released before sweeping them.
- if (p->LiveBytes() == 0) {
- if (unused_page_present) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
- reinterpret_cast<intptr_t>(p));
- }
- space->ReleasePage(p);
- continue;
- }
- unused_page_present = true;
- }
+ // Get forwarding address before resetting map pointer
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
- reinterpret_cast<intptr_t>(p),
- sweeper);
- }
+ // Reset map pointer. The meta map object may not be copied yet so
+ // Map::cast does not yet work.
+ obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
- switch (sweeper) {
- case CONSERVATIVE: {
- SweepConservatively(space, p);
- break;
- }
- case LAZY_CONSERVATIVE: {
- freed_bytes += SweepConservatively(space, p);
- if (freed_bytes >= newspace_size && p != space->LastPage()) {
- space->SetPagesToSweep(p->next_page(), space->anchor());
- lazy_sweeping_active = true;
- }
- break;
- }
- case PRECISE: {
- if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
- } else {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
- }
- break;
- }
- default: {
- UNREACHABLE();
- }
- }
+ Address old_addr = obj->address();
+
+ if (new_addr != old_addr) {
+ // Move contents.
+ heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
- // Give pages that are queued to be freed back to the OS.
- heap()->FreeQueuedChunks();
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ }
+#endif
+
+ return Map::kSize;
}
-void MarkCompactCollector::SweepSpaces() {
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-#ifdef DEBUG
- state_ = SWEEP_SPACES;
-#endif
- SweeperType how_to_sweep =
- FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (sweep_precisely_) how_to_sweep = PRECISE;
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
- SweepSpace(heap()->old_pointer_space(), how_to_sweep);
- SweepSpace(heap()->old_data_space(), how_to_sweep);
+static inline int RestoreMap(HeapObject* obj,
+ PagedSpace* space,
+ Address new_addr,
+ Address map_addr) {
+ // This must be a non-map object, and the function relies on the
+ // assumption that the Map space is compacted before the other paged
+ // spaces (see RelocateObjects).
- RemoveDeadInvalidatedCode();
- SweepSpace(heap()->code_space(), PRECISE);
+ // Reset map pointer.
+ obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
- SweepSpace(heap()->cell_space(), PRECISE);
+ int obj_size = obj->Size();
+ ASSERT_OBJECT_SIZE(obj_size);
- { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
- EvacuateNewSpaceAndCandidates();
+ ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
+ space->MCSpaceOffsetForAddress(obj->address()));
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", obj->address(), new_addr);
}
+#endif
- // ClearNonLiveTransitions depends on precise sweeping of map space to
- // detect whether unmarked map became dead in this collection or in one
- // of the previous ones.
- SweepSpace(heap()->map_space(), PRECISE);
+ return obj_size;
+}
- ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
- // Deallocate unmarked objects and clear marked bits for marked objects.
- heap_->lo_space()->FreeUnmarkedObjects();
-}
+int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
+ PagedSpace* space) {
+ // Recover map pointer.
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(map_addr));
+ // Get forwarding address before resetting map pointer.
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(heap()->isolate());
- } else {
- if (code_flusher_ == NULL) return;
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
+ // Reset the map pointer.
+ int obj_size = RestoreMap(obj, space, new_addr, map_addr);
+ Address old_addr = obj->address();
-// TODO(1466) ReportDeleteIfNeeded is not called currently.
-// Our profiling tools do not expect intersections between
-// code objects. We should either reenable it or change our tools.
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
- Isolate* isolate) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (obj->IsCode()) {
- GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
+ if (new_addr != old_addr) {
+ // Move contents.
+ if (space == heap()->old_data_space()) {
+ heap()->MoveBlock(new_addr, old_addr, obj_size);
+ } else {
+ heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
}
-#endif
- if (obj->IsCode()) {
- PROFILE(isolate, CodeDeleteEvent(obj->address()));
+
+ ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
+
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsSharedFunctionInfo()) {
+ PROFILE(heap()->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
}
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
+
+ return obj_size;
}
-void MarkCompactCollector::Initialize() {
- StaticMarkingVisitor::Initialize();
+int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
+ return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
}
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
- return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
+ return RelocateOldNonCodeObject(obj, heap()->old_data_space());
}
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- SlotType type,
- Address addr,
- AdditionMode mode) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- ASSERT(buffer->HasSpaceForTypedSlot());
- buffer->Add(reinterpret_cast<ObjectSlot>(type));
- buffer->Add(reinterpret_cast<ObjectSlot>(addr));
- return true;
+int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
+ return RelocateOldNonCodeObject(obj, heap()->cell_space());
}
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTarget(rmode)) {
- return SlotsBuffer::CODE_TARGET_SLOT;
- } else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
- return SlotsBuffer::DEBUG_TARGET_SLOT;
- } else if (RelocInfo::IsJSReturn(rmode)) {
- return SlotsBuffer::JS_RETURN_SLOT;
- }
- UNREACHABLE();
- return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
+int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
+ // Recover map pointer.
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+ ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ // Get forwarding address before resetting map pointer
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == NULL ||
- !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotTypeForRMode(rinfo->rmode()),
- rinfo->pc(),
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(target_page);
- }
- }
-}
+ // Reset the map pointer.
+ int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
+ Address old_addr = obj->address();
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- if (target_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotsBuffer::CODE_ENTRY_SLOT,
- slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(target_page);
- }
+ if (new_addr != old_addr) {
+ // Move contents.
+ heap()->MoveBlock(new_addr, old_addr, obj_size);
}
-}
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsCode()) {
+ // May also update inline cache target.
+ Code::cast(copied_to)->Relocate(new_addr - old_addr);
+ // Notify the logger that compiled code has moved.
+ PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
+ }
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-static inline SlotsBuffer::SlotType DecodeSlotType(
- SlotsBuffer::ObjectSlot slot) {
- return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+ return obj_size;
}
-void SlotsBuffer::UpdateSlots(Heap* heap) {
- PointersUpdatingVisitor v(heap);
+int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
+ int obj_size = obj->Size();
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- } else {
- ++slot_idx;
- ASSERT(slot_idx < idx_);
- UpdateSlot(&v,
- DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
+ // Get forwarding address
+ Address old_addr = obj->address();
+ int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
+
+ Address new_addr =
+ Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
+
+#ifdef DEBUG
+ if (heap()->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
+ } else {
+ ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
+ heap()->TargetSpace(obj) == heap()->old_data_space());
+ }
+#endif
+
+ // New and old addresses cannot overlap.
+ if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
+ heap()->CopyBlock(new_addr, old_addr, obj_size);
+ } else {
+ heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
-}
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ }
+#endif
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
- PointersUpdatingVisitor v(heap);
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsSharedFunctionInfo()) {
+ PROFILE(heap()->isolate(),
+ SharedFunctionInfoMoveEvent(old_addr, new_addr));
+ }
+ HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- }
- } else {
- ++slot_idx;
- ASSERT(slot_idx < idx_);
- Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
- if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(&v,
- DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
+ return obj_size;
+}
+
+
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+ if (enable) {
+ if (code_flusher_ != NULL) return;
+ code_flusher_ = new CodeFlusher(heap()->isolate());
+ } else {
+ if (code_flusher_ == NULL) return;
+ delete code_flusher_;
+ code_flusher_ = NULL;
}
}
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
- return new SlotsBuffer(next_buffer);
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+ Isolate* isolate) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (obj->IsCode()) {
+ GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
+ }
+#endif
+ if (obj->IsCode()) {
+ PROFILE(isolate, CodeDeleteEvent(obj->address()));
+ }
}
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
- delete buffer;
+int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
+ MapWord map_word = obj->map_word();
+ map_word.ClearMark();
+ return obj->SizeFromMap(map_word.ToMap());
}
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
- SlotsBuffer* buffer = *buffer_address;
- while (buffer != NULL) {
- SlotsBuffer* next_buffer = buffer->next();
- DeallocateBuffer(buffer);
- buffer = next_buffer;
- }
- *buffer_address = NULL;
+void MarkCompactCollector::Initialize() {
+ StaticPointersToNewGenUpdatingVisitor::Initialize();
+ StaticMarkingVisitor::Initialize();
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index d54d822495..9b67c8affe 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,6 @@
#ifndef V8_MARK_COMPACT_H_
#define V8_MARK_COMPACT_H_
-#include "compiler-intrinsics.h"
#include "spaces.h"
namespace v8 {
@@ -46,365 +45,62 @@ class MarkingVisitor;
class RootMarkingVisitor;
-class Marking {
- public:
- explicit Marking(Heap* heap)
- : heap_(heap) {
- }
-
- static inline MarkBit MarkBitFrom(Address addr);
-
- static inline MarkBit MarkBitFrom(HeapObject* obj) {
- return MarkBitFrom(reinterpret_cast<Address>(obj));
- }
-
- // Impossible markbits: 01
- static const char* kImpossibleBitPattern;
- static inline bool IsImpossible(MarkBit mark_bit) {
- ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
- return !mark_bit.Get() && mark_bit.Next().Get();
- }
-
- // Black markbits: 10 - this is required by the sweeper.
- static const char* kBlackBitPattern;
- static inline bool IsBlack(MarkBit mark_bit) {
- ASSERT(strcmp(kBlackBitPattern, "10") == 0);
- ASSERT(!IsImpossible(mark_bit));
- return mark_bit.Get() && !mark_bit.Next().Get();
- }
-
- // White markbits: 00 - this is required by the mark bit clearer.
- static const char* kWhiteBitPattern;
- static inline bool IsWhite(MarkBit mark_bit) {
- ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
- ASSERT(!IsImpossible(mark_bit));
- return !mark_bit.Get();
- }
-
- // Grey markbits: 11
- static const char* kGreyBitPattern;
- static inline bool IsGrey(MarkBit mark_bit) {
- ASSERT(strcmp(kGreyBitPattern, "11") == 0);
- ASSERT(!IsImpossible(mark_bit));
- return mark_bit.Get() && mark_bit.Next().Get();
- }
-
- static inline void MarkBlack(MarkBit mark_bit) {
- mark_bit.Set();
- mark_bit.Next().Clear();
- ASSERT(Marking::IsBlack(mark_bit));
- }
-
- static inline void BlackToGrey(MarkBit markbit) {
- ASSERT(IsBlack(markbit));
- markbit.Next().Set();
- ASSERT(IsGrey(markbit));
- }
-
- static inline void WhiteToGrey(MarkBit markbit) {
- ASSERT(IsWhite(markbit));
- markbit.Set();
- markbit.Next().Set();
- ASSERT(IsGrey(markbit));
- }
-
- static inline void GreyToBlack(MarkBit markbit) {
- ASSERT(IsGrey(markbit));
- markbit.Next().Clear();
- ASSERT(IsBlack(markbit));
- }
-
- static inline void BlackToGrey(HeapObject* obj) {
- ASSERT(obj->Size() >= 2 * kPointerSize);
- BlackToGrey(MarkBitFrom(obj));
- }
-
- static inline void AnyToGrey(MarkBit markbit) {
- markbit.Set();
- markbit.Next().Set();
- ASSERT(IsGrey(markbit));
- }
-
- // Returns true if the the object whose mark is transferred is marked black.
- bool TransferMark(Address old_start, Address new_start);
-
-#ifdef DEBUG
- enum ObjectColor {
- BLACK_OBJECT,
- WHITE_OBJECT,
- GREY_OBJECT,
- IMPOSSIBLE_COLOR
- };
-
- static const char* ColorName(ObjectColor color) {
- switch (color) {
- case BLACK_OBJECT: return "black";
- case WHITE_OBJECT: return "white";
- case GREY_OBJECT: return "grey";
- case IMPOSSIBLE_COLOR: return "impossible";
- }
- return "error";
- }
-
- static ObjectColor Color(HeapObject* obj) {
- return Color(Marking::MarkBitFrom(obj));
- }
-
- static ObjectColor Color(MarkBit mark_bit) {
- if (IsBlack(mark_bit)) return BLACK_OBJECT;
- if (IsWhite(mark_bit)) return WHITE_OBJECT;
- if (IsGrey(mark_bit)) return GREY_OBJECT;
- UNREACHABLE();
- return IMPOSSIBLE_COLOR;
- }
-#endif
-
- // Returns true if the transferred color is black.
- INLINE(static bool TransferColor(HeapObject* from,
- HeapObject* to)) {
- MarkBit from_mark_bit = MarkBitFrom(from);
- MarkBit to_mark_bit = MarkBitFrom(to);
- bool is_black = false;
- if (from_mark_bit.Get()) {
- to_mark_bit.Set();
- is_black = true; // Looks black so far.
- }
- if (from_mark_bit.Next().Get()) {
- to_mark_bit.Next().Set();
- is_black = false; // Was actually gray.
- }
- ASSERT(Color(from) == Color(to));
- ASSERT(is_black == (Color(to) == BLACK_OBJECT));
- return is_black;
- }
-
- private:
- Heap* heap_;
-};
-
// ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
+// Marking stack for tracing live objects.
-class MarkingDeque {
+class MarkingStack {
public:
- MarkingDeque()
- : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
+ MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
void Initialize(Address low, Address high) {
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
- array_ = obj_low;
- mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
- top_ = bottom_ = 0;
+ top_ = low_ = reinterpret_cast<HeapObject**>(low);
+ high_ = reinterpret_cast<HeapObject**>(high);
overflowed_ = false;
}
- inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
+ bool is_full() const { return top_ >= high_; }
- inline bool IsEmpty() { return top_ == bottom_; }
+ bool is_empty() const { return top_ <= low_; }
bool overflowed() const { return overflowed_; }
- void ClearOverflowed() { overflowed_ = false; }
-
- void SetOverflowed() { overflowed_ = true; }
+ void clear_overflowed() { overflowed_ = false; }
// Push the (marked) object on the marking stack if there is room,
// otherwise mark the object as overflowed and wait for a rescan of the
// heap.
- inline void PushBlack(HeapObject* object) {
- ASSERT(object->IsHeapObject());
- if (IsFull()) {
- Marking::BlackToGrey(object);
- MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
- SetOverflowed();
+ void Push(HeapObject* object) {
+ CHECK(object->IsHeapObject());
+ if (is_full()) {
+ object->SetOverflow();
+ overflowed_ = true;
} else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
+ *(top_++) = object;
}
}
- inline void PushGrey(HeapObject* object) {
- ASSERT(object->IsHeapObject());
- if (IsFull()) {
- ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
- SetOverflowed();
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- }
- }
-
- inline HeapObject* Pop() {
- ASSERT(!IsEmpty());
- top_ = ((top_ - 1) & mask_);
- HeapObject* object = array_[top_];
- ASSERT(object->IsHeapObject());
+ HeapObject* Pop() {
+ ASSERT(!is_empty());
+ HeapObject* object = *(--top_);
+ CHECK(object->IsHeapObject());
return object;
}
- inline void UnshiftGrey(HeapObject* object) {
- ASSERT(object->IsHeapObject());
- if (IsFull()) {
- ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
- SetOverflowed();
- } else {
- bottom_ = ((bottom_ - 1) & mask_);
- array_[bottom_] = object;
- }
- }
-
- HeapObject** array() { return array_; }
- int bottom() { return bottom_; }
- int top() { return top_; }
- int mask() { return mask_; }
- void set_top(int top) { top_ = top; }
-
private:
- HeapObject** array_;
- // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
- // empty when top_ == bottom_. It is full when top_ + 1 == bottom
- // (mod mask + 1).
- int top_;
- int bottom_;
- int mask_;
+ HeapObject** low_;
+ HeapObject** top_;
+ HeapObject** high_;
bool overflowed_;
- DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
-};
-
-
-class SlotsBufferAllocator {
- public:
- SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
- void DeallocateBuffer(SlotsBuffer* buffer);
-
- void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-// - Untyped slots are expected to contain a tagged object pointer.
-// They are recorded by an address.
-// - Typed slots are expected to contain an encoded pointer to a heap
-// object where the way of encoding depends on the type of the slot.
-// They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
- typedef Object** ObjectSlot;
-
- explicit SlotsBuffer(SlotsBuffer* next_buffer)
- : idx_(0), chain_length_(1), next_(next_buffer) {
- if (next_ != NULL) {
- chain_length_ = next_->chain_length_ + 1;
- }
- }
-
- ~SlotsBuffer() {
- }
-
- void Add(ObjectSlot slot) {
- ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
- slots_[idx_++] = slot;
- }
-
- enum SlotType {
- EMBEDDED_OBJECT_SLOT,
- RELOCATED_CODE_OBJECT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- DEBUG_TARGET_SLOT,
- JS_RETURN_SLOT,
- NUMBER_OF_SLOT_TYPES
- };
-
- void UpdateSlots(Heap* heap);
-
- void UpdateSlotsWithFilter(Heap* heap);
-
- SlotsBuffer* next() { return next_; }
-
- static int SizeOfChain(SlotsBuffer* buffer) {
- if (buffer == NULL) return 0;
- return static_cast<int>(buffer->idx_ +
- (buffer->chain_length_ - 1) * kNumberOfElements);
- }
-
- inline bool IsFull() {
- return idx_ == kNumberOfElements;
- }
-
- inline bool HasSpaceForTypedSlot() {
- return idx_ < kNumberOfElements - 1;
- }
-
- static void UpdateSlotsRecordedIn(Heap* heap,
- SlotsBuffer* buffer,
- bool code_slots_filtering_required) {
- while (buffer != NULL) {
- if (code_slots_filtering_required) {
- buffer->UpdateSlotsWithFilter(heap);
- } else {
- buffer->UpdateSlots(heap);
- }
- buffer = buffer->next();
- }
- }
-
- enum AdditionMode {
- FAIL_ON_OVERFLOW,
- IGNORE_OVERFLOW
- };
-
- static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
- return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
- }
-
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- ObjectSlot slot,
- AdditionMode mode) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || buffer->IsFull()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- buffer->Add(slot);
- return true;
- }
-
- static bool IsTypedSlot(ObjectSlot slot);
-
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- SlotType type,
- Address addr,
- AdditionMode mode);
-
- static const int kNumberOfElements = 1021;
-
- private:
- static const int kChainLengthThreshold = 6;
-
- intptr_t idx_;
- intptr_t chain_length_;
- SlotsBuffer* next_;
- ObjectSlot slots_[kNumberOfElements];
+ DISALLOW_COPY_AND_ASSIGN(MarkingStack);
};
// -------------------------------------------------------------------------
// Mark-Compact collector
+
+class OverflowedObjectsScanner;
+
class MarkCompactCollector {
public:
// Type of functions to compute forwarding addresses of objects in
@@ -438,17 +134,12 @@ class MarkCompactCollector {
// Set the global force_compaction flag, it must be called before Prepare
// to take effect.
- inline void SetFlags(int flags);
-
- inline bool PreciseSweepingRequired() {
- return sweep_precisely_;
+ void SetForceCompaction(bool value) {
+ force_compaction_ = value;
}
- static void Initialize();
-
- void CollectEvacuationCandidates(PagedSpace* space);
- void AddEvacuationCandidate(Page* p);
+ static void Initialize();
// Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact.
@@ -457,9 +148,23 @@ class MarkCompactCollector {
// Performs a global garbage collection.
void CollectGarbage();
- bool StartCompaction();
+ // True if the last full GC performed heap compaction.
+ bool HasCompacted() { return compacting_collection_; }
+
+ // True after the Prepare phase if the compaction is taking place.
+ bool IsCompacting() {
+#ifdef DEBUG
+ // For the purposes of asserts we don't want this to keep returning true
+ // after the collection is completed.
+ return state_ != IDLE && compacting_collection_;
+#else
+ return compacting_collection_;
+#endif
+ }
- void AbortCompaction();
+ // The count of the number of objects left marked at the end of the last
+ // completed full GC (expected to be zero).
+ int previous_marked_count() { return previous_marked_count_; }
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
@@ -474,99 +179,29 @@ class MarkCompactCollector {
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
+ // Returns size of a possibly marked object.
+ static int SizeOfMarkedObject(HeapObject* obj);
+
// Distinguishable invalid map encodings (for single word and multiple words)
// that indicate free regions.
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
- static inline bool IsMarked(Object* obj);
-
inline Heap* heap() const { return heap_; }
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
void EnableCodeFlushing(bool enable);
- enum SweeperType {
- CONSERVATIVE,
- LAZY_CONSERVATIVE,
- PRECISE
- };
-
-#ifdef DEBUG
- void VerifyMarkbitsAreClean();
- static void VerifyMarkbitsAreClean(PagedSpace* space);
- static void VerifyMarkbitsAreClean(NewSpace* space);
-#endif
-
- // Sweep a single page from the given space conservatively.
- // Return a number of reclaimed bytes.
- static intptr_t SweepConservatively(PagedSpace* space, Page* p);
-
- INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
- return Page::FromAddress(reinterpret_cast<Address>(anchor))->
- ShouldSkipEvacuationSlotRecording();
- }
-
- INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
- return Page::FromAddress(reinterpret_cast<Address>(host))->
- ShouldSkipEvacuationSlotRecording();
- }
-
- INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
- return Page::FromAddress(reinterpret_cast<Address>(obj))->
- IsEvacuationCandidate();
- }
-
- void EvictEvacuationCandidate(Page* page) {
- if (FLAG_trace_fragmentation) {
- PrintF("Page %p is too popular. Disabling evacuation.\n",
- reinterpret_cast<void*>(page));
- }
-
- // TODO(gc) If all evacuation candidates are too popular we
- // should stop slots recording entirely.
- page->ClearEvacuationCandidate();
-
- // We were not collecting slots on this page that point
- // to other evacuation candidates thus we have to
- // rescan the page after evacuation to discover and update all
- // pointers to evacuated objects.
- if (page->owner()->identity() == OLD_DATA_SPACE) {
- evacuation_candidates_.RemoveElement(page);
- } else {
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
-
- void RecordRelocSlot(RelocInfo* rinfo, Object* target);
- void RecordCodeEntrySlot(Address slot, Code* target);
-
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
-
- void MigrateObject(Address dst,
- Address src,
- int size,
- AllocationSpace to_old_space);
-
- bool TryPromoteObject(HeapObject* object, int object_size);
-
inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
inline void set_encountered_weak_maps(Object* weak_map) {
encountered_weak_maps_ = weak_map;
}
- void InvalidateCode(Code* code);
-
private:
MarkCompactCollector();
~MarkCompactCollector();
- bool MarkInvalidatedCode();
- void RemoveDeadInvalidatedCode();
- void ProcessInvalidatedCode(ObjectVisitor* visitor);
-
-
#ifdef DEBUG
enum CollectorState {
IDLE,
@@ -582,26 +217,23 @@ class MarkCompactCollector {
CollectorState state_;
#endif
- // Global flag that forces sweeping to be precise, so we can traverse the
- // heap.
- bool sweep_precisely_;
+ // Global flag that forces a compaction.
+ bool force_compaction_;
- // True if we are collecting slots to perform evacuation from evacuation
- // candidates.
- bool compacting_;
+ // Global flag indicating whether spaces were compacted on the last GC.
+ bool compacting_collection_;
- bool was_marked_incrementally_;
+ // Global flag indicating whether spaces will be compacted on the next GC.
+ bool compact_on_next_gc_;
- bool collect_maps_;
+ // The number of objects left marked at the end of the last completed full
+ // GC (expected to be zero).
+ int previous_marked_count_;
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
- SlotsBufferAllocator slots_buffer_allocator_;
-
- SlotsBuffer* migration_slots_buffer_;
-
// Finishes GC, performs heap verification if enabled.
void Finish();
@@ -626,13 +258,13 @@ class MarkCompactCollector {
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
- void AfterMarking();
+ void MarkUnmarkedObject(HeapObject* obj);
- INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
-
- INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+ inline void MarkObject(HeapObject* obj) {
+ if (!obj->IsMarked()) MarkUnmarkedObject(obj);
+ }
- void ProcessNewlyMarkedObject(HeapObject* obj);
+ inline void SetMark(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
@@ -666,18 +298,18 @@ class MarkCompactCollector {
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
- void ProcessMarkingDeque();
+ void ProcessMarkingStack();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
- void EmptyMarkingDeque();
+ void EmptyMarkingStack();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
- void RefillMarkingDeque();
+ void RefillMarkingStack();
// After reachable maps have been marked process per context object
// literal map caches removing unmarked entries.
@@ -691,16 +323,17 @@ class MarkCompactCollector {
void UpdateLiveObjectCount(HeapObject* obj);
#endif
+ // We sweep the large object space in the same way whether we are
+ // compacting or not, because the large object space is never compacted.
+ void SweepLargeObjectSpace();
+
+ // Test whether a (possibly marked) object is a Map.
+ static inline bool SafeIsMap(HeapObject* object);
+
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveTransitions();
- // Marking detaches initial maps from SharedFunctionInfo objects
- // to make this reference weak. We need to reattach initial maps
- // back after collection. This is either done during
- // ClearNonLiveTransitions pass or by calling this function.
- void ReattachInitialMaps();
-
// Mark all values associated with reachable keys in weak maps encountered
// so far. This might push new object or even new weak maps onto the
// marking stack.
@@ -713,30 +346,133 @@ class MarkCompactCollector {
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
- // a non-compacting collection.
+ // a non-compacting collection, or else computing and encoding
+ // forwarding addresses for a compacting collection.
//
// Before: Live objects are marked and non-live objects are unmarked.
//
- // After: Live objects are unmarked, non-live regions have been added to
- // their space's free list. Active eden semispace is compacted by
- // evacuation.
+ // After: (Non-compacting collection.) Live objects are unmarked,
+ // non-live regions have been added to their space's free
+ // list.
+ //
+ // After: (Compacting collection.) The forwarding address of live
+ // objects in the paged spaces is encoded in their map word
+ // along with their (non-forwarded) map pointer.
+ //
+ // The forwarding address of live objects in the new space is
+ // written to their map word's offset in the inactive
+ // semispace.
+ //
+ // Bookkeeping data is written to the page header of
+ // eached paged-space page that contains live objects after
+ // compaction:
//
+ // The allocation watermark field is used to track the
+ // relocation top address, the address of the first word
+ // after the end of the last live object in the page after
+ // compaction.
+ //
+ // The Page::mc_page_index field contains the zero-based index of the
+ // page in its space. This word is only used for map space pages, in
+ // order to encode the map addresses in 21 bits to free 11
+ // bits per map word for the forwarding address.
+ //
+ // The Page::mc_first_forwarded field contains the (nonencoded)
+ // forwarding address of the first live object in the page.
+ //
+ // In both the new space and the paged spaces, a linked list
+ // of live regions is constructructed (linked through
+ // pointers in the non-live region immediately following each
+ // live region) to speed further passes of the collector.
+
+ // Encodes forwarding addresses of objects in compactable parts of the
+ // heap.
+ void EncodeForwardingAddresses();
+
+ // Encodes the forwarding addresses of objects in new space.
+ void EncodeForwardingAddressesInNewSpace();
+
+ // Function template to encode the forwarding addresses of objects in
+ // paged spaces, parameterized by allocation and non-live processing
+ // functions.
+ template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
+ void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
+
+ // Iterates live objects in a space, passes live objects
+ // to a callback function which returns the heap size of the object.
+ // Returns the number of live objects iterated.
+ int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
+ int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
+
+ // Iterates the live objects between a range of addresses, returning the
+ // number of live objects.
+ int IterateLiveObjectsInRange(Address start, Address end,
+ LiveObjectCallback size_func);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
void SweepSpaces();
- void EvacuateNewSpace();
+ // -----------------------------------------------------------------------
+ // Phase 3: Updating pointers in live objects.
+ //
+ // Before: Same as after phase 2 (compacting collection).
+ //
+ // After: All pointers in live objects, including encoded map
+ // pointers, are updated to point to their target's new
+ // location.
+
+ friend class UpdatingVisitor; // helper for updating visited objects
- void EvacuateLiveObjectsFromPage(Page* p);
+ // Updates pointers in all spaces.
+ void UpdatePointers();
- void EvacuatePages();
+ // Updates pointers in an object in new space.
+ // Returns the heap size of the object.
+ int UpdatePointersInNewObject(HeapObject* obj);
- void EvacuateNewSpaceAndCandidates();
+ // Updates pointers in an object in old spaces.
+ // Returns the heap size of the object.
+ int UpdatePointersInOldObject(HeapObject* obj);
- void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Calculates the forwarding address of an object in an old space.
+ static Address GetForwardingAddressInOldSpace(HeapObject* obj);
+ // -----------------------------------------------------------------------
+ // Phase 4: Relocating objects.
+ //
+ // Before: Pointers to live objects are updated to point to their
+ // target's new location.
+ //
+ // After: Objects have been moved to their new addresses.
+
+ // Relocates objects in all spaces.
+ void RelocateObjects();
+
+ // Converts a code object's inline target to addresses, convention from
+ // address to target happens in the marking phase.
+ int ConvertCodeICTargetToAddress(HeapObject* obj);
+
+ // Relocate a map object.
+ int RelocateMapObject(HeapObject* obj);
+
+ // Relocates an old object.
+ int RelocateOldPointerObject(HeapObject* obj);
+ int RelocateOldDataObject(HeapObject* obj);
+
+ // Relocate a property cell object.
+ int RelocateCellObject(HeapObject* obj);
+
+ // Helper function.
+ inline int RelocateOldNonCodeObject(HeapObject* obj,
+ PagedSpace* space);
+
+ // Relocates an object in the code space.
+ int RelocateCodeObject(HeapObject* obj);
+
+ // Copy a new object.
+ int RelocateNewObject(HeapObject* obj);
#ifdef DEBUG
// -----------------------------------------------------------------------
@@ -776,19 +512,15 @@ class MarkCompactCollector {
#endif
Heap* heap_;
- MarkingDeque marking_deque_;
+ MarkingStack marking_stack_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
- List<Page*> evacuation_candidates_;
- List<Code*> invalidated_code_;
-
friend class Heap;
+ friend class OverflowedObjectsScanner;
};
-const char* AllocationSpaceName(AllocationSpace space);
-
} } // namespace v8::internal
#endif // V8_MARK_COMPACT_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index c70463d63b..b6ad5ac352 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -1,4 +1,5 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -80,11 +81,11 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
}
Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
+ ? FACTORY->undefined_value()
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Handle<Object>::cast(FACTORY->undefined_value())
+ ? FACTORY->undefined_value()
: Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message =
@@ -148,15 +149,12 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
JSFunction::cast(
Isolate::Current()->js_builtins_object()->
GetPropertyNoExceptionThrown(*fmt_str)));
- Handle<Object> argv[] = { data };
+ Object** argv[1] = { data.location() };
bool caught_exception;
Handle<Object> result =
Execution::TryCall(fun,
- Isolate::Current()->js_builtins_object(),
- ARRAY_SIZE(argv),
- argv,
- &caught_exception);
+ Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
if (caught_exception || !result->IsString()) {
return FACTORY->LookupAsciiSymbol("<error>");
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 553c511c34..c4c4fd2590 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -78,6 +78,7 @@ bool Operand::is_reg() const {
}
+
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -119,11 +120,6 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
- if (host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
}
@@ -153,10 +149,6 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (host() != NULL && target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
}
@@ -188,12 +180,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
}
@@ -214,11 +200,6 @@ void RelocInfo::set_call_address(Address target) {
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, target);
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
}
@@ -261,7 +242,12 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ Object** p = target_object_address();
+ Object* orig = *p;
+ visitor->VisitPointer(p);
+ if (*p != orig) {
+ set_target_object(*p);
+ }
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -271,9 +257,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
@@ -287,7 +273,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
+ StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e933181d41..e01a0ca70b 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -74,9 +74,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
- CpuFeaturesImpliedByCompiler());
- ASSERT(supported_ == 0 || supported_ == standard_features);
+ ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -84,7 +82,8 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also allowed for generated code in the
// snapshot.
- supported_ |= standard_features;
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= CpuFeaturesImpliedByCompiler();
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2019,8 +2018,7 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2043,7 +2041,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 5609d5ee4a..d77230448f 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -587,11 +587,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- }
+ __ EnterInternalFrame();
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ LeaveInternalFrame();
__ pop(function);
__ mov(argument, v0);
__ Branch(&argument_is_string);
@@ -607,11 +606,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
+ __ EnterInternalFrame();
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
__ Ret();
}
@@ -624,13 +622,13 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label slow, non_function_call;
+ Label non_function_call;
// Check that the function is not a smi.
__ And(t0, a1, Operand(kSmiTagMask));
__ Branch(&non_function_call, eq, t0, Operand(zero_reg));
// Check that the function is a JSFunction.
__ GetObjectType(a1, a2, a2);
- __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
// Jump to the function-specific construct stub.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -640,21 +638,13 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a2: object type
- Label do_call;
- __ bind(&slow);
- __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
__ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc.
// Set expected number of arguments to zero (not changing a0).
__ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -677,336 +667,331 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -----------------------------------
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ __ EnterConstructFrame();
- // Preserve the two incoming parameters on the stack.
- __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
- __ MultiPushReversed(a0.bit() | a1.bit());
+ // Preserve the two incoming parameters on the stack.
+ __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
+ __ MultiPushReversed(a0.bit() | a1.bit());
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ // Use t7 to hold undefined, which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- Label rt_call, allocated;
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- if (FLAG_inline_new) {
- Label undo_allocation;
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
#endif
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Branch(&rt_call, eq, t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
- __ lbu(t0, constructor_count);
- __ Subu(t0, t0, Operand(1));
- __ sb(t0, constructor_count);
- __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
- __ Push(a1, a2);
-
- __ push(a1); // Constructor.
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(a2);
- __ pop(a1);
-
- __ bind(&allocate);
- }
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+ __ lbu(t0, constructor_count);
+ __ Subu(t0, t0, Operand(1));
+ __ sb(t0, constructor_count);
+ __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+ __ Push(a1, a2);
+
+ __ push(a1); // Constructor.
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(a2);
+ __ pop(a1);
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t4: JSObject (not tagged)
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3*kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t4: JSObject (not tagged)
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t6, t4, t0); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ { Label loop, entry;
if (count_constructions) {
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(a0, t5, t0);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ Assert(le, "Unexpected number of pre-allocated property fields.",
- a0, Operand(t6));
- }
- __ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
}
- __ InitializeFieldsWithFiller(t5, t6, t7);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
- // a1: constructor function
- // t4: JSObject
- // t5: start of next object (not tagged)
- __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ Addu(a3, a3, Operand(t6));
- __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ subu(a3, a3, t6);
-
- // Done if no extra properties are to be allocated.
- __ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, "Property allocation count failed.",
- a3, Operand(zero_reg));
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // a1: constructor
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: start of next object
- __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- a0,
- t5,
- t6,
- a2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // a1: constructor
- // a3: number of elements in properties array (un-tagged)
- // t4: JSObject
- // t5: start of next object
- __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
- __ mov(a2, t5);
- __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
- __ sll(a0, a3, kSmiTagSize);
- __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(2 * kPointerSize));
-
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
- // Initialize the fields to undefined.
- // a1: constructor
- // a2: First element of FixedArray (not tagged)
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ sll(t3, a3, kPointerSizeLog2);
- __ addu(t6, a2, t3); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
- }
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(a2));
- __ addiu(a2, a2, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, less, a2, Operand(t6));
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject.
- // a1: constructor function
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // t4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(t4, t5);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(t5, 0));
+ __ addiu(t5, t5, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, Uless, t5, Operand(t6));
}
- __ bind(&rt_call);
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- __ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(t4, v0);
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
- // Receiver for constructor call allocated.
+ // Check if a non-empty properties array is needed. Continue with allocated
+ // object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields and
+ // in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ And(t6,
+ a0,
+ Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+ __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+ __ Addu(a3, a3, Operand(t0));
+ __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+ __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+ __ subu(a3, a3, t0);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: start of next object
+ __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ a0,
+ t5,
+ t6,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (un-tagged)
// t4: JSObject
- __ bind(&allocated);
- __ push(t4);
+ // t5: start of next object
+ __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t5);
+ __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+ __ sll(a0, a3, kSmiTagSize);
+ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ sll(t3, a3, kPointerSizeLog2);
+ __ addu(t6, a2, t3); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(a2));
+ __ addiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t6));
+ }
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, kPointerSize));
- __ MultiPushReversed(a1.bit() | t4.bit());
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
- // Reload the number of arguments from the stack.
+ // Continue with JSObject being successfully allocated.
// a1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t4, t5);
+ }
- // Setup pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ bind(&rt_call);
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ __ push(a1); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(t4, v0);
- // Setup number of arguments for function call below.
- __ srl(a0, a3, kSmiTagSize);
+ // Receiver for constructor call allocated.
+ // t4: JSObject
+ __ bind(&allocated);
+ __ push(t4);
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ lw(t1, MemOperand(t0));
- __ push(t1);
- __ bind(&entry);
- __ Addu(a3, a3, Operand(-2));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ MultiPushReversed(a1.bit() | t4.bit());
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- if (is_api_function) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Reload the number of arguments from the stack.
+ // a1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ lw(a3, MemOperand(sp, 4 * kPointerSize));
- // Pop the function from the stack.
- // v0: result
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ Pop();
+ // Setup pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below.
+ __ srl(a0, a3, kSmiTagSize);
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(a3, a3, Operand(-2));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
- // Restore context from the frame.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ And(t0, v0, Operand(kSmiTagMask));
- __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a3, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-
- // Leave construct frame.
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
+ // Pop the function from the stack.
+ // v0: result
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ Pop();
+
+ // Restore context from the frame.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a3, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ __ LeaveConstructFrame();
__ sll(t0, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, t0);
__ Addu(sp, sp, kPointerSize);
@@ -1046,61 +1031,59 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, zero_reg);
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Set up the context from the function argument.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ li(s6, Operand(roots_address));
+ // Set up the context from the function argument.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Push the function and the receiver onto the stack.
- __ Push(a1, a2);
+ // Set up the roots register.
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
+ __ li(s6, Operand(roots_address));
- // Copy arguments to the stack in a loop.
- // a3: argc
- // s0: argv, ie points to first arg
- Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t2, s0, t0);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(t2));
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t0);
- __ mov(s2, t0);
- __ mov(s3, t0);
- __ mov(s4, t0);
- __ mov(s5, t0);
- // s6 holds the root address. Do not clobber.
- // s7 is cp. Do not init.
-
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Push the function and the receiver onto the stack.
+ __ Push(a1, a2);
- // Leave internal frame.
+ // Copy arguments to the stack in a loop.
+ // a3: argc
+ // s0: argv, ie points to first arg
+ Label loop, entry;
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t2, s0, t0);
+ __ b(&entry);
+ __ nop(); // Branch delay slot nop.
+ // t2 points past last arg.
+ __ bind(&loop);
+ __ lw(t0, MemOperand(s0)); // Read next parameter.
+ __ addiu(s0, s0, kPointerSize);
+ __ lw(t0, MemOperand(t0)); // Dereference handle.
+ __ push(t0); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, s0, Operand(t2));
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t0);
+ __ mov(s2, t0);
+ __ mov(s3, t0);
+ __ mov(s4, t0);
+ __ mov(s5, t0);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code and pass argc as a0.
+ __ mov(a0, a3);
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
+ __ LeaveInternalFrame();
+
__ Jump(ra);
}
@@ -1117,28 +1100,27 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ // Call the runtime function.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
- // Tear down temporary frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(t9);
@@ -1147,27 +1129,26 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
- // Tear down temporary frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(t9);
@@ -1209,20 +1190,19 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
// a0: actual number of arguments
- Label slow, non_function;
+ Label non_function;
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
__ And(at, a1, Operand(kSmiTagMask));
__ Branch(&non_function, eq, at, Operand(zero_reg));
__ GetObjectType(a1, a2, a2);
- __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
// 3a. Patch the first argument if necessary when calling a function.
// a0: actual number of arguments
// a1: function
Label shift_arguments;
- __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION.
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1230,13 +1210,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Do not transform the receiver for strict mode functions.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
// Do not transform the receiver for native (Compilerhints already in a3).
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
// Compute the receiver in non-strict mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1258,25 +1238,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert_to_object);
- // Enter an internal frame in order to preserve argument count.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a2, v0);
-
- __ pop(a0);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- // Leave internal frame.
- }
- // Restore the function to a1, and the flag to t0.
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ push(a0);
+
+ __ push(a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ __ LeaveInternalFrame();
+ // Restore the function to a1.
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
- __ li(t0, Operand(0, RelocInfo::NONE));
__ Branch(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1297,31 +1273,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&shift_arguments);
}
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy.
- __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- __ bind(&non_function);
- __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function.
-
- // 3c. Patch the first argument when calling a non-function. The
+ // 3b. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
// a0: actual number of arguments
// a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ bind(&non_function);
+ // Restore the function in case it has been modified.
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
__ sw(a1, MemOperand(a2, -kPointerSize));
+ // Clear a1 to indicate a non-function being called.
+ __ mov(a1, zero_reg);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// a0: actual number of arguments
// a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
__ bind(&shift_arguments);
{ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
@@ -1339,26 +1309,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
// a0: actual number of arguments
// a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ Branch(&function, eq, t0, Operand(zero_reg));
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(a2, zero_reg);
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Branch(&non_proxy, ne, t0, Operand(1));
-
- __ push(a1); // Re-add proxy object as additional argument.
- __ Addu(a0, a0, Operand(1));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
+ { Label function;
+ __ Branch(&function, ne, a1, Operand(zero_reg));
+ __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1392,161 +1350,134 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
- __ push(a0);
- // Returns (in v0) number of arguments to copy to stack as Smi.
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
- // Make a2 the space we have left. The stack might already be overflowed
- // here which will cause a2 to become negative.
- __ subu(a2, sp, a2);
- // Check if the arguments will overflow the stack.
- __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison.
-
- // Out of stack space.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ push(a1);
- __ push(v0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
-
- // Get the receiver.
- __ lw(a0, MemOperand(fp, kRecvOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Change context eagerly to get the right global object if necessary.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in a1.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a2).
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
-
- // Compute the receiver in non-strict mode.
- __ And(t3, a0, Operand(kSmiTagMask));
- __ Branch(&call_to_object, eq, t3, Operand(zero_reg));
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a1));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a2));
-
- // Check if the receiver is already a JavaScript object.
- // a0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Convert the receiver to a regular object.
- // a0: receiver
- __ bind(&call_to_object);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
- __ Branch(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // a0: receiver
- __ bind(&push_receiver);
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // a0: current argument index
- __ bind(&loop);
- __ lw(a1, MemOperand(fp, kArgsOffset));
- __ push(a1);
- __ push(a0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Addu(a0, a0, Operand(1 << kSmiTagSize));
- __ sw(a0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, kLimitOffset));
- __ Branch(&loop, ne, a0, Operand(a1));
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(a0);
- __ sra(a0, a0, kSmiTagSize);
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ EnterInternalFrame();
+
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison.
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ push(a1);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(v0); // Limit.
+ __ mov(a1, zero_reg); // Initial index.
+ __ push(a1);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ lw(a0, MemOperand(fp, kFunctionOffset));
+ __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a0.
+ __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ lw(a0, MemOperand(fp, kRecvOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a2).
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ __ And(t0, a0, Operand(kSmiTagMask));
+ __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
- scope.GenerateLeaveFrame();
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, kArgsOffset));
+ __ push(a1);
+ __ push(a0);
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(a1); // Add function proxy as last argument.
- __ Addu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
- }
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, kIndexOffset));
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+ // Invoke the function.
+ ParameterCount actual(a0);
+ __ sra(a0, a0, kSmiTagSize);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ __ LeaveInternalFrame();
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index fe251b9e6f..521b8e58f0 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -190,71 +190,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- v0, a1, a2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ lw(a3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- // Setup the object header.
- __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ li(a2, Operand(Smi::FromInt(length)));
- __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the global context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(a3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ Assert(eq, message, a3, Operand(zero_reg));
- }
- __ lw(a3, GlobalObjectOperand());
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
- __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Setup the fixed slots.
- __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
- __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
- __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
-
- // Copy the global object from the previous context.
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, v0);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
// [sp]: constant elements.
@@ -680,7 +615,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
- DoubleRegister double_dst,
+ FPURegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -716,16 +651,25 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
// Load the double value.
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_dst,
- scratch1,
- except_flag,
- kCheckForInexactConversion);
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, double_dst);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
// Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
if (destination == kCoreRegisters) {
__ Move(dst1, dst2, double_dst);
@@ -762,7 +706,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DoubleRegister double_scratch,
+ FPURegister double_scratch,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -791,19 +735,27 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Load the double value.
__ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
- FPURegister single_scratch = double_scratch.low();
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
- scratch1,
- except_flag,
- kCheckForInexactConversion);
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(double_scratch, double_scratch);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
// Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
// Get the result in the destination register.
- __ mfc1(dst, single_scratch);
+ __ mfc1(dst, double_scratch);
} else {
// Load the double value in the destination registers.
@@ -929,11 +881,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ Move(f12, a0, a1);
__ Move(f14, a2, a3);
}
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 4);
// Store answer in the overwritable heap number.
if (!IsMipsSoftFloatABI) {
CpuFeatures::Scope scope(FPU);
@@ -951,35 +901,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(a1) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a2) &&
- sign_.is(a3)) {
- return true;
- }
- if (the_int_.is(a2) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a3) &&
- sign_.is(a0)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
- WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
-}
-
-
// See comment for class, this does NOT work for int32's that are in Smi range.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1337,7 +1258,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
if (!CpuFeatures::IsSupported(FPU)) {
__ push(ra);
- __ PrepareCallCFunction(0, 2, t4);
+ __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
if (!IsMipsSoftFloatABI) {
// We are not using MIPS FPU instructions, and parameters for the runtime
// function call are prepaired in a0-a3 registers, but function we are
@@ -1347,17 +1268,19 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ Move(f12, a0, a1);
__ Move(f14, a2, a3);
}
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
__ pop(ra); // Because this function returns int, result is in v0.
__ Ret();
} else {
CpuFeatures::Scope scope(FPU);
Label equal, less_than;
- __ BranchF(&equal, NULL, eq, f12, f14);
- __ BranchF(&less_than, NULL, lt, f12, f14);
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&equal);
+ __ nop();
+
+ __ c(OLT, D, f12, f14);
+ __ bc1t(&less_than);
+ __ nop();
// Not equal, not less, not NaN, must be greater.
__ li(v0, Operand(GREATER));
@@ -1380,7 +1303,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1550,7 +1473,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ JumpIfSmi(probe, not_found);
__ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
__ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&load_result_from_cache);
+ __ nop(); // bc1t() requires explicit fill of branch delay slot.
__ Branch(not_found);
} else {
// Note that there is no cache check for non-FPU case, even though
@@ -1666,7 +1591,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ li(t2, Operand(EQUAL));
// Check if either rhs or lhs is NaN.
- __ BranchF(NULL, &nan, eq, f12, f14);
+ __ c(UN, D, f12, f14);
+ __ bc1t(&nan);
+ __ nop();
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
@@ -1784,144 +1711,88 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses FPU instructions.
CpuFeatures::Scope scope(FPU);
- Label patch;
- const Register map = t5.is(tos_) ? t3 : t5;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ And(at, tos_, kSmiTagMask);
- // tos_ contains the correct return value already
- __ Ret(eq, at, Operand(zero_reg));
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ movn(tos_, zero_reg, at);
- __ Ret(ne, at, Operand(zero_reg));
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Label skip;
- __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- __ Ret(); // the string length is OK as the return value
- __ bind(&skip);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- Label zero_or_nan, number;
- __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ bind(&zero_or_nan);
- __ mov(tos_, zero_reg);
- __ bind(&number);
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(at, value);
- __ Subu(at, at, tos_); // This is a check for equality for the movz below.
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ movz(tos_, zero_reg, at);
- }
- __ Ret(eq, at, Operand(zero_reg));
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ Move(a3, tos_);
- __ li(a2, Operand(Smi::FromInt(tos_.code())));
- __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(a3, a2, a1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ MultiPush(kJSCallerSaved | ra.bit());
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- __ MultiPushFPU(kCallerSavedFPU);
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
- const Register scratch = a1;
+ Label false_result;
+ Label not_heap_number;
+ Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+ // undefined -> false
+ __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // Boolean -> its value
+ __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+ __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the equal condition is satisfied.
+ __ Ret(eq, tos_, Operand(scratch0));
+
+ // Smis: 0 -> false, all other -> true
+ __ And(scratch0, tos_, tos_);
+ __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+ __ And(scratch0, tos_, Operand(kSmiTagMask));
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the not equal condition is satisfied.
+ __ Ret(eq, scratch0, Operand(zero_reg));
+
+ // 'null' -> false
+ __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // HeapNumber => false if +0, -0, or NaN.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+
+ __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ fcmp(f12, 0.0, UEQ);
+
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ movt(tos_, zero_reg);
+ __ Ret();
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ li(a0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- __ MultiPopFPU(kCallerSavedFPU);
- }
+ __ bind(&not_heap_number);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+ __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+ // JavaScript object => true.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Check for string.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+ // String value => false iff empty, i.e., length is zero.
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
- __ MultiPop(kJSCallerSaved | ra.bit());
+ // Return 0 in "tos_" for false.
+ __ bind(&false_result);
+ __ mov(tos_, zero_reg);
__ Ret();
}
@@ -2080,13 +1951,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a1, v0);
- __ pop(a0);
- }
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a1, v0);
+ __ pop(a0);
+ __ LeaveInternalFrame();
__ bind(&heapnumber_allocated);
__ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -2128,14 +1998,13 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(v0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a2, v0); // Move the new heap number into a2.
- // Get the heap number into v0, now that the new heap number is in a2.
- __ pop(v0);
- }
+ __ EnterInternalFrame();
+ __ push(v0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a2, v0); // Move the new heap number into a2.
+ // Get the heap number into v0, now that the new heap number is in a2.
+ __ pop(v0);
+ __ LeaveInternalFrame();
// Convert the heap number in v0 to an untagged integer in a1.
// This can't go slow-case because it's the same number we already
@@ -2246,9 +2115,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -2851,16 +2717,26 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Otherwise return a heap number if allowed, or jump to type
// transition.
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- f10,
- scratch1,
- except_flag);
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, f10);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
if (result_type_ <= BinaryOpIC::INT32) {
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
+ // If scratch2 != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, scratch2, Operand(zero_reg));
}
// Check if the result fits in a smi.
@@ -3053,9 +2929,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ Ret();
} else {
// Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
+ // a3 and a1 as scratch. v0 is preserved and returned.
__ mov(a0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
__ TailCallStub(&stub);
}
@@ -3349,6 +3225,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ lw(t0, MemOperand(cache_entry, 0));
__ lw(t1, MemOperand(cache_entry, 4));
__ lw(t2, MemOperand(cache_entry, 8));
+ __ Addu(cache_entry, cache_entry, 12);
__ Branch(&calculate, ne, a2, Operand(t0));
__ Branch(&calculate, ne, a3, Operand(t1));
// Cache hit. Load result, cleanup and return.
@@ -3382,13 +3259,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Register a0 holds precalculated cache entry address; preserve
// it on the stack and pop it into register cache_entry after the
// call.
- __ Push(cache_entry, a2, a3);
+ __ push(cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ Pop(cache_entry, a2, a3);
+ __ pop(cache_entry);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3406,11 +3283,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
__ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
__ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3423,15 +3299,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in f4 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
+ __ EnterInternalFrame();
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ li(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
__ Ret();
}
}
@@ -3442,26 +3317,22 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
__ push(ra);
__ PrepareCallCFunction(2, scratch);
if (IsMipsSoftFloatABI) {
- __ Move(a0, a1, f4);
+ __ Move(v0, v1, f4);
} else {
__ mov_d(f12, f4);
}
- AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(
- ExternalReference::math_sin_double_function(masm->isolate()),
- 0, 1);
+ ExternalReference::math_sin_double_function(masm->isolate()), 2);
break;
case TranscendentalCache::COS:
__ CallCFunction(
- ExternalReference::math_cos_double_function(masm->isolate()),
- 0, 1);
+ ExternalReference::math_cos_double_function(masm->isolate()), 2);
break;
case TranscendentalCache::LOG:
__ CallCFunction(
- ExternalReference::math_log_double_function(masm->isolate()),
- 0, 1);
+ ExternalReference::math_log_double_function(masm->isolate()), 2);
break;
default:
UNIMPLEMENTED();
@@ -3544,15 +3415,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumbermap,
&call_runtime);
__ push(ra);
- __ PrepareCallCFunction(1, 1, scratch);
+ __ PrepareCallCFunction(3, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
- }
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()), 3);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
@@ -3575,20 +3443,15 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumbermap,
&call_runtime);
__ push(ra);
- __ PrepareCallCFunction(0, 2, scratch);
+ __ PrepareCallCFunction(4, scratch);
// ABI (o32) for func(double a, double b): a in f12, b in f14.
ASSERT(double_base.is(f12));
ASSERT(double_exponent.is(f14));
__ SetCallCDoubleArguments(double_base, double_exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0,
- 2);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
- }
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
@@ -3605,37 +3468,6 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
- code->set_is_pregenerated(true);
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(v0);
}
@@ -3658,17 +3490,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// s1: pointer to the first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
- Isolate* isolate = masm->isolate();
-
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+ __ PrepareCallCFunction(1, a1);
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(masm->isolate()), 1);
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
if (always_allocate) {
__ li(a0, Operand(scope_depth));
__ lw(a1, MemOperand(a0));
@@ -3757,16 +3588,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
// Retrieve the pending exception and clear the variable.
- __ li(a3, Operand(isolate->factory()->the_hole_value()));
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a3, MemOperand(t0));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ masm->isolate())));
__ lw(v0, MemOperand(t0));
__ sw(a3, MemOperand(t0));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ Branch(throw_termination_exception, eq,
- v0, Operand(isolate->factory()->termination_exception()));
+ v0, Operand(masm->isolate()->factory()->termination_exception()));
// Handle normal exception.
__ jmp(throw_normal_exception);
@@ -3795,7 +3628,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Subu(s1, s1, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3849,7 +3681,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
- Isolate* isolate = masm->isolate();
// Registers:
// a0: entry address
@@ -3868,11 +3699,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
CpuFeatures::Scope scope(FPU);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
}
-
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(FPU)) {
@@ -3887,7 +3715,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- isolate)));
+ masm->isolate())));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
// Setup frame pointer for the frame to be pushed.
@@ -3911,7 +3739,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
+ masm->isolate());
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ lw(t2, MemOperand(t1));
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -3934,7 +3763,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Coming in here the fp will be invalid because the PushTryHandler below
// sets it to 0 to signal the existence of the JSEntry frame.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ masm->isolate())));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
__ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit); // b exposes branch delay slot.
@@ -3949,9 +3778,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ li(t1, Operand(isolate->factory()->the_hole_value()));
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(t1, MemOperand(t0));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ masm->isolate())));
__ sw(t1, MemOperand(t0));
// Invoke the function by calling through JS entry trampoline builtin.
@@ -3974,7 +3805,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
+ masm->isolate());
__ li(t0, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
@@ -4002,7 +3833,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptors from the stack.
__ pop(t1);
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- isolate)));
+ masm->isolate())));
__ sw(t1, MemOperand(t0));
// Reset the stack to the callee saved registers.
@@ -4026,10 +3857,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * object: a0 or at sp + 1 * kPointerSize.
// * function: a1 or at sp.
//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register t0.
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
void InstanceofStub::Generate(MacroAssembler* masm) {
+ // This is a crankshaft-specific feature that has not been implemented yet.
+ ASSERT(!HasCallSiteInlineCheck());
// Call site inlining and patching implies arguments in registers.
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// ReturnTrueFalse is only implemented for inlined call sites.
@@ -4043,8 +3875,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register inline_site = t5;
const Register scratch = a2;
- const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
-
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
@@ -4060,10 +3890,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(at));
+ __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(t1));
+ __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(t1));
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -4083,15 +3913,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
- ASSERT(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in t0 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, t0);
- __ Subu(inline_site, ra, scratch);
- // Patch the relocated value to map.
- __ PatchRelocatedValue(inline_site, scratch, map);
+ UNIMPLEMENTED_MIPS();
}
// Register mapping: a3 is object map and t0 is function prototype.
@@ -4117,16 +3939,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
- // Patch the call site to return true.
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- ASSERT_EQ(Smi::FromInt(0), 0);
- __ mov(v0, zero_reg);
- }
+ UNIMPLEMENTED_MIPS();
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -4135,17 +3948,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
- // Patch the call site to return false.
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
+ UNIMPLEMENTED_MIPS();
}
-
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
@@ -4182,11 +3986,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
+ __ EnterInternalFrame();
+ __ Push(a0, a1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
__ mov(a0, v0);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4608,6 +4411,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4620,8 +4427,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kSubjectOffset = 2 * kPointerSize;
static const int kJSRegExpOffset = 3 * kPointerSize;
- Isolate* isolate = masm->isolate();
-
Label runtime, invoke_regexp;
// Allocation of registers for this function. These are in callee save
@@ -4637,9 +4442,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
- isolate);
+ masm->isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -4720,7 +4525,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
FieldMemOperand(a0, JSArray::kElementsOffset));
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ Branch(&runtime, ne, a0, Operand(
- isolate->factory()->fixed_array_map()));
+ masm->isolate()->factory()->fixed_array_map()));
// Check that the last match info has space for the capture registers and the
// additional information.
__ lw(a0,
@@ -4811,7 +4616,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(),
+ __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -4851,12 +4656,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate)));
+ ExternalReference::address_of_static_offsets_vector(masm->isolate())));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
// and calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4909,9 +4715,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(isolate->factory()->the_hole_value()));
+ __ li(a1, Operand(
+ ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a1, MemOperand(a1, 0));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ masm->isolate())));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
@@ -4929,7 +4737,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ li(v0, Operand(isolate->factory()->null_value()));
+ __ li(v0, Operand(masm->isolate()->factory()->null_value()));
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -4949,29 +4757,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sw(a2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
+ __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ mov(a2, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- a2,
- t3,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- t3,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ mov(a3, last_match_info_elements);
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
+ ExternalReference::address_of_static_offsets_vector(masm->isolate());
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
@@ -5096,24 +4895,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Code* code) {
- code->set_has_function_cache(false);
-}
-
-
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- UNREACHABLE();
-}
-
-
-Object* CallFunctionStub::GetCachedValue(Address address) {
- UNREACHABLE();
- return NULL;
-}
-
-
void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow, non_function;
+ Label slow;
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -5139,7 +4922,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ __ JumpIfSmi(a1, &slow);
// Get the map of the function object.
__ GetObjectType(a1, a2, a2);
__ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -5167,22 +4950,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
- // Check for function proxy.
- __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
- __ li(a2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_FUNCTION);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
__ sw(a1, MemOperand(sp, argc_ * kPointerSize));
__ li(a0, Operand(argc_)); // Setup the number of arguments.
__ mov(a2, zero_reg);
@@ -5288,27 +5057,24 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ Branch(&call_runtime_, ne, result_, Operand(t0));
// Get the first of the two strings and load its instance type.
- __ lw(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
__ addu(scratch_, scratch_, result_);
- __ lw(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
+ __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ lw(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t0, result_, Operand(kStringRepresentationMask));
__ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
- // Actually fetch the parent string if it is confirmed to be sequential.
- STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
- __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -6697,25 +6463,39 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ Subu(a2, a0, Operand(kHeapObjectTag));
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
- Label fpu_eq, fpu_lt;
- // Test if equal, and also handle the unordered/NaN case.
- __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+ Label fpu_eq, fpu_lt, fpu_gt;
+ // Compare operands (test if unordered).
+ __ c(UN, D, f0, f2);
+ // Don't base result on status bits when a NaN is involved.
+ __ bc1t(&unordered);
+ __ nop();
- // Test if less (unordered case is already handled).
- __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+ // Test if equal.
+ __ c(EQ, D, f0, f2);
+ __ bc1t(&fpu_eq);
+ __ nop();
- // Otherwise it's greater, so just fall thru, and return.
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(GREATER)); // In delay slot.
+ // Test if unordered or less (unordered case is already handled).
+ __ c(ULT, D, f0, f2);
+ __ bc1t(&fpu_lt);
+ __ nop();
+ // Otherwise it's greater.
+ __ bc1f(&fpu_gt);
+ __ nop();
+
+ // Return a result of -1, 0, or 1.
__ bind(&fpu_eq);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(EQUAL)); // In delay slot.
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
__ bind(&fpu_lt);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(LESS)); // In delay slot.
+ __ li(v0, Operand(LESS));
+ __ Ret();
+
+ __ bind(&fpu_gt);
+ __ li(v0, Operand(GREATER));
+ __ Ret();
__ bind(&unordered);
}
@@ -6866,13 +6646,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a0);
- __ li(t0, Operand(Smi::FromInt(op_)));
- __ push(t0);
- __ CallExternalReference(miss, 3);
- }
+ __ EnterInternalFrame();
+ __ Push(a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op_)));
+ __ push(t0);
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -7088,8 +6867,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// a1: key
@@ -7183,269 +6960,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { s2, s0, t3, EMIT_REMEMBERED_SET },
- { s2, a2, t3, EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { a3, t0, t1, EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { t0, a1, a2, OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { a1, a2, a3, EMIT_REMEMBERED_SET },
- { a3, a2, a1, EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { a2, a1, a3, EMIT_REMEMBERED_SET },
- { a3, a1, a2, EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { t0, a2, a3, EMIT_REMEMBERED_SET },
- // Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
-};
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two branch+nop instructions are generated with labels so as to
- // get the offset fixed up correctly by the bind(Label*) call. We patch it
- // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
- // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
- // incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
- __ nop();
- __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
- __ nop();
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(a0));
- __ Move(address, regs_.address());
- __ Move(a0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(a1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ lw(a1, MemOperand(address, 0));
- }
- __ li(a2, Operand(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index ef6b88908e..aa224bcfa6 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -59,25 +59,6 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -343,15 +324,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
- sign_(scratch2) {
- ASSERT(IntRegisterBits::is_valid(the_int_.code()));
- ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
- ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
- ASSERT(SignRegisterBits::is_valid(sign_.code()));
- }
-
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ sign_(scratch2) { }
private:
Register the_int_;
@@ -363,15 +336,13 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
- class SignRegisterBits: public BitField<int, 12, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code())
- | SignRegisterBits::encode(sign_.code());
+ | ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
@@ -404,215 +375,6 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- ASSERT(Assembler::IsBne(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- const unsigned offset = masm->instr_at(pos) & kImm16Mask;
- masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
- (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
- ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- 2 * Assembler::kInstrSize);
-
- if (Assembler::IsBeq(first_instruction)) {
- return INCREMENTAL;
- }
-
- ASSERT(Assembler::IsBne(first_instruction));
-
- if (Assembler::IsBeq(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(Assembler::IsBne(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- masm->MultiPushFPU(kCallerSavedFPU);
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(FPU);
- masm->MultiPopFPU(kCallerSavedFPU);
- }
- masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 5> {};
- class ValueBits: public BitField<int, 5, 5> {};
- class AddressBits: public BitField<int, 10, 5> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
@@ -816,8 +578,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -830,7 +590,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index ff146dd4ed..4400b643ad 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -38,16 +38,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
+ masm->EnterInternalFrame();
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
+ masm->LeaveInternalFrame();
}
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index b020d80575..a8de9c8610 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -71,6 +71,21 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ // Constants related to patching of inlined load/store.
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ // This is in correlation with the padding in MacroAssembler::Abort.
+ return FLAG_debug_code ? 45 : 20;
+ }
+
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
+
+ static int GetInlinedNamedStoreInstructionsAfterPatch() {
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ // Magic number 5: instruction count after patched map load:
+ // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
+ return Isolate::Current()->inlined_write_barrier_size() + 5;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 5b3ae89db0..e323c505e4 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -124,59 +124,56 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ And(at, reg, 0xc0000000);
- __ Assert(
- eq, "Unable to encode value as smi", at, Operand(zero_reg));
- }
- __ sll(reg, reg, kSmiTagSize);
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ And(at, reg, 0xc0000000);
+ __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
}
+ __ sll(reg, reg, kSmiTagSize);
}
- __ MultiPush(object_regs | non_object_regs);
}
+ __ MultiPush(object_regs | non_object_regs);
+ }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(a0, zero_reg); // No arguments.
- __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ MultiPop(object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ srl(reg, reg, kSmiTagSize);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ li(reg, kDebugZapValue);
- }
+ __ mov(a0, zero_reg); // No arguments.
+ __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ srl(reg, reg, kSmiTagSize);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
}
}
-
- // Leave the internal frame.
}
+ __ LeaveInternalFrame();
+
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 280b8cb549..18b6231999 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -53,8 +53,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index a2ebce6829..2c838938b7 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -85,20 +85,6 @@ static const RegList kCalleeSavedFPU =
1 << 30; // f30
static const int kNumCalleeSavedFPU = 6;
-
-static const RegList kCallerSavedFPU =
- 1 << 0 | // f0
- 1 << 2 | // f2
- 1 << 4 | // f4
- 1 << 6 | // f6
- 1 << 8 | // f8
- 1 << 10 | // f10
- 1 << 12 | // f12
- 1 << 14 | // f14
- 1 << 16 | // f16
- 1 << 18; // f18
-
-
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
static const int kNumSafepointRegisters = 24;
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index b3f0540872..9a210c49ea 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -47,7 +47,6 @@
#include "stub-cache.h"
#include "mips/code-stubs-mips.h"
-#include "mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -63,11 +62,9 @@ static unsigned GetPropertyId(Property* property) {
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
-// (raw 16 bit immediate value is used) is the delta from the pc to the first
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
// instruction of the patchable code.
-// The marker instruction is effectively a NOP (dest is zero_reg) and will
-// never be emitted by normal code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -106,7 +103,7 @@ class JumpPatchSite BASE_EMBEDDED {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
- __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
+ __ andi(at, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
@@ -165,11 +162,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
int locals_count = info->scope()->num_stack_slots();
__ Push(ra, fp, cp, a1);
@@ -215,12 +207,14 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ sw(a0, target);
-
- // Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ li(a1, Operand(Context::SlotOffset(var->index())));
+ __ addu(a2, cp, a1);
+ __ sw(a0, MemOperand(a2, 0));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(a2, cp);
+ __ RecordWrite(a2, a1, a3);
}
}
}
@@ -278,7 +272,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -316,25 +310,17 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
- // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
- // to make sure it is constant. Branch may emit a skip-or-jump sequence
- // instead of the normal Branch. It seems that the "skip" part of that
- // sequence is about as long as this Branch would be so it is safe to ignore
- // that.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ sltu(at, sp, t0);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
+ __ Branch(&ok, hs, sp, Operand(t0));
StackCheckStub stub;
- __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ __ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -684,12 +670,10 @@ void FullCodeGenerator::SetVar(Variable* var,
__ sw(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kRAHasBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(scratch0,
+ Operand(Context::SlotOffset(var->index())),
+ scratch1,
+ src);
}
}
@@ -721,7 +705,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@@ -739,7 +723,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ sw(result_register(), StackOperand(variable));
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ sw(t0, StackOperand(variable));
@@ -766,16 +750,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ sw(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- a2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ mov(a1, cp);
+ __ RecordWrite(a1, Operand(offset), a2, result_register());
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ sw(at, ContextOperand(cp, variable->index()));
@@ -788,8 +766,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == VAR || mode == CONST || mode == LET);
- PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST ||
+ mode == Variable::LET);
+ PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
__ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -799,7 +779,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
__ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
__ Push(cp, a2, a1, a0);
} else {
@@ -1221,25 +1201,17 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
+ if (var->mode() == Variable::DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ Branch(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
+ } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == LET) {
+ if (local->mode() == Variable::CONST) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST) {
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET
- __ Branch(done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
}
__ Branch(done);
}
@@ -1272,14 +1244,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != LET && var->mode() != CONST) {
+ if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET) {
+ if (var->mode() == Variable::LET) {
Label done;
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
@@ -1519,23 +1491,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
- __ lw(t6, MemOperand(sp)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
+ __ lw(a1, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ sw(result_register(), FieldMemOperand(a1, offset));
- Label no_map_change;
- __ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with v0 as the scratch
// register.
- __ RecordWriteField(
- a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ CheckFastSmiOnlyElements(a3, a2, &no_map_change);
- __ push(t6); // Copy of array literal.
- __ CallRuntime(Runtime::kNonSmiElementStored, 1);
- __ bind(&no_map_change);
+ __ RecordWrite(a1, Operand(offset), a2, result_register());
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1887,7 +1850,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(v0); // Value.
@@ -1912,12 +1875,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(a3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a1, Operand(offset), a2, a3);
}
}
- } else if (var->mode() != CONST) {
+ } else if (var->mode() != Variable::CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
@@ -1931,9 +1893,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ sw(v0, location);
if (var->IsContextSlot()) {
__ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
}
} else {
ASSERT(var->IsLookupSlot());
@@ -2161,8 +2121,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(a1);
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
- StrictModeFlag strict_mode =
- FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
+ StrictModeFlag strict_mode = strict_mode_flag();
+ if (FLAG_harmony_block_scoping) {
+ strict_mode = kStrictMode;
+ }
__ li(a1, Operand(Smi::FromInt(strict_mode)));
__ push(a1);
@@ -2208,7 +2170,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
+ if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@@ -2709,23 +2671,18 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
__ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
+ // Check if the constructor in the map is a function.
__ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
__ GetObjectType(v0, a1, a1);
__ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
@@ -2904,9 +2861,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(a2, v0);
- __ RecordWriteField(
- a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
__ bind(&done);
context()->Plug(v0);
@@ -3199,31 +3154,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ sw(scratch1, MemOperand(index2, 0));
__ sw(scratch2, MemOperand(index1, 0));
- Label no_remembered_set;
- __ CheckPageFlag(elements,
- scratch1,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &no_remembered_set);
+ Label new_space;
+ __ InNewSpace(elements, scratch1, eq, &new_space);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask).
- // We are swapping two objects in an array and the incremental marker never
- // pauses in the middle of scanning a single object. Therefore the
- // incremental marker is not disturbed, so we don't need to call the
- // RecordWrite stub that notifies the incremental marker.
- __ RememberedSetHelper(elements,
- index1,
- scratch2,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
- __ RememberedSetHelper(elements,
- index2,
- scratch2,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
+ __ mov(scratch1, elements);
+ __ RecordWriteHelper(elements, index1, scratch2);
+ __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
- __ bind(&no_remembered_set);
+ __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3981,14 +3921,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@@ -4028,11 +3964,10 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(v0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
- Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
- if_true, if_false, fall_through);
+ __ GetObjectType(v0, a1, v0); // Leave map in a1.
+ Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
@@ -4051,7 +3986,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
- context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -4059,12 +4005,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
+
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4072,6 +4015,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+ context()->Plug(if_true, if_false);
+ return;
+ }
+
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@@ -4156,9 +4106,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4166,21 +4115,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(sub_expr);
+ VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
__ mov(a0, result_register());
- __ LoadRoot(a1, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ if (expr->is_strict()) {
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
__ Branch(if_true, eq, a0, Operand(a1));
- __ LoadRoot(a1, other_nil_value);
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ Branch(if_true, eq, a0, Operand(a1));
__ And(at, a0, Operand(kSmiTagMask));
__ Branch(if_false, eq, at, Operand(zero_reg));
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index fb33eb6651..a76c215a48 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -210,8 +210,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch2, scratch1);
}
@@ -505,22 +504,21 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
+ // Push the receiver and the name of the function.
+ __ Push(a3, a2);
- // Call the entry.
- __ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ li(a0, Operand(2));
+ __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- }
+ // Move result to a1 and leave the internal frame.
+ __ mov(a1, v0);
+ __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -651,13 +649,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a2); // Save the key.
- __ Push(a1, a2); // Pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- }
+ __ EnterInternalFrame();
+ __ push(a2); // Save the key.
+ __ Push(a1, a2); // Pass the receiver and the key.
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(a2); // Restore the key.
+ __ LeaveInternalFrame();
__ mov(a1, v0);
__ jmp(&do_call);
@@ -905,9 +902,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
__ sw(a0, mapped_location);
- __ Addu(t2, a3, t1);
- __ mov(t5, a0);
- __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ // Verify mapped_location MemOperand is register, with no offset.
+ ASSERT_EQ(mapped_location.offset(), 0);
+ __ RecordWrite(a3, mapped_location.rm(), t5);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&notin);
@@ -915,9 +912,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
__ sw(a0, unmapped_location);
- __ Addu(t2, a3, t0);
- __ mov(t5, a0);
- __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ ASSERT_EQ(unmapped_location.offset(), 0);
+ __ RecordWrite(a3, unmapped_location.rm(), t5);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
@@ -1205,144 +1201,109 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- a2 : receiver
// -- ra : return address
// -----------------------------------
- Label slow, array, extra, check_if_double_array;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
+
+ Label slow, fast, array, extra, exit;
// Register usage.
Register value = a0;
Register key = a1;
Register receiver = a2;
Register elements = a3; // Elements array of the receiver.
- Register elements_map = t2;
- Register receiver_map = t3;
- // t0 and t1 are used as general scratch registers.
+ // t0 is used as ip in the arm version.
+ // t3-t4 are used as temporaries.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
+
// Get the map of the object.
- __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
__ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
+ __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+ __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
// Check that the object is some kind of JSObject.
- __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
// Object case: Check key against length in the elements array.
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode and writable.
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
+ __ Branch(&fast, lo, key, Operand(t0));
+ // Fall thru to slow if un-tagged index >= length.
// Slow case, handle jump to runtime.
__ bind(&slow);
+
// Entry registers are intact.
// a0: value.
// a1: key.
// a2: receiver.
+
GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
+
__ bind(&extra);
- // Condition code from comparing key and array length is still available.
// Only support writing to array[array.length].
__ Branch(&slow, ne, key, Operand(t0));
// Check for room in the elements backing store.
// Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&check_if_double_array, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
// Calculate key + 1 as smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(t0, key, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&fast_object_without_map_check);
-
- __ bind(&check_if_double_array);
- __ Branch(&slow, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- // Add 1 to key, and go to common element store code for doubles.
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(t0, key, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ jmp(&fast_double_without_map_check);
+ STATIC_ASSERT(0 == kSmiTag);
+ __ Addu(t3, key, Operand(Smi::FromInt(1)));
+ __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&fast);
+
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
+
__ bind(&array);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
// Check the key against the length in the array.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
// Fall through to fast case.
- __ bind(&fast_object_with_map_check);
- Register scratch_value = t0;
- Register address = t1;
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&fast_double_with_map_check, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value);
-
- __ bind(&non_smi_value);
- // Escape to slow case when writing non-smi into smi-only array.
- __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
- // Fast elements array, store the value to the elements backing store.
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
+ __ bind(&fast);
+ // Fast case, store the value to the elements backing store.
+ __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t4, t4, Operand(t1));
+ __ sw(value, MemOperand(t4));
+ // Skip write barrier if the written value is a smi.
+ __ JumpIfSmi(value, &exit);
+
// Update write barrier for the elements array address.
- __ mov(v0, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- value,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
+ __ Subu(t3, t4, Operand(elements));
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ Branch(&slow, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements,
- t0,
- t1,
- t2,
- t3,
- &slow);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value);
+ __ RecordWrite(elements, Operand(t3), t4, t5);
+ __ bind(&exit);
+
+ __ mov(v0, a0); // Return the value written.
+ __ Ret();
}
@@ -1611,8 +1572,7 @@ void PatchInlinedSmiCode(Address address) {
// If the instruction following the call is not a andi at, rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
- if (!(Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
+ if (!Assembler::IsAndImmediate(instr)) {
return;
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 2964fbc86c..4c48ef183c 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -42,8 +42,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
+ allow_stub_calls_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -81,15 +80,46 @@ void MacroAssembler::StoreRoot(Register source,
}
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register address,
+ Register scratch) {
+ if (emit_debug_code()) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, ne, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
+
+ // Calculate page address: Clear bits from 0 to kPageSizeBits.
+ if (mips32r2) {
+ Ins(object, zero_reg, 0, kPageSizeBits);
+ } else {
+ // The Ins macro is slow on r1, so use shifts instead.
+ srl(object, object, kPageSizeBits);
+ sll(object, object, kPageSizeBits);
+ }
+
+ // Calculate region number.
+ Ext(address, address, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
+
+ // Mark region dirty.
+ lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ li(at, Operand(1));
+ sllv(at, at, address);
+ or_(scratch, scratch, at);
+ sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+}
+
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ASSERT(num_unsaved >= 0);
- if (num_unsaved > 0) {
- Subu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
+ Subu(sp, sp, Operand(num_unsaved * kPointerSize));
MultiPush(kSafepointSavedRegisters);
}
@@ -97,9 +127,7 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
- if (num_unsaved > 0) {
- Addu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
+ Addu(sp, sp, Operand(num_unsaved * kPointerSize));
}
@@ -152,7 +180,6 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
- UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -160,6 +187,8 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
}
+
+
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
@@ -171,53 +200,38 @@ void MacroAssembler::InNewSpace(Register object,
}
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!AreAliased(value, dst, t8, object));
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
+// Will clobber 4 registers: object, scratch0, scratch1, at. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
+ Label done;
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch0, eq, &done);
- Addu(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
+ // Add offset into the object.
+ Addu(scratch0, object, offset);
- RecordWrite(object,
- dst,
- value,
- ra_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK);
+ // Record the actual write.
+ RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
- // Clobber clobbered input registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -227,97 +241,29 @@ void MacroAssembler::RecordWriteField(
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!AreAliased(object, address, value, t8));
- ASSERT(!AreAliased(object, address, value, t9));
+ Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- And(t8, value, Operand(kSmiTagMask));
- Branch(&done, eq, t8, Operand(zero_reg));
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
// Record the actual write.
- if (ra_status == kRAHasNotBeenSaved) {
- push(ra);
- }
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
- if (ra_status == kRAHasNotBeenSaved) {
- pop(ra);
- }
+ RecordWriteHelper(object, address, scratch);
bind(&done);
- // Clobber clobbered registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
- }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
- Label done;
- if (FLAG_debug_code) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- li(t8, Operand(store_buffer));
- lw(scratch, MemOperand(t8));
- // Store pointer to buffer and increment buffer top.
- sw(address, MemOperand(scratch));
- Addu(scratch, scratch, kPointerSize);
- // Write back new top of buffer.
- sw(scratch, MemOperand(t8));
- // Call stub on end of buffer.
- // Check for end of buffer.
- And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kFallThroughAtEnd) {
- Branch(&done, eq, t8, Operand(zero_reg));
- } else {
- ASSERT(and_then == kReturnAtEnd);
- Ret(eq, t8, Operand(zero_reg));
- }
- push(ra);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
- CallStub(&store_buffer_overflow);
- pop(ra);
- bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(address, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -761,7 +707,7 @@ void MacroAssembler::MultiPush(RegList regs) {
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -800,7 +746,7 @@ void MacroAssembler::MultiPop(RegList regs) {
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t stack_offset = 0;
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
@@ -816,7 +762,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
@@ -858,7 +804,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
@@ -868,21 +814,6 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
}
-void MacroAssembler::FlushICache(Register address, unsigned instructions) {
- RegList saved_regs = kJSCallerSaved | ra.bit();
- MultiPush(saved_regs);
- AllowExternalCallThatCantCauseGC scope(this);
-
- // Save to a0 in case address == t0.
- Move(a0, address);
- PrepareCallCFunction(2, t0);
-
- li(a1, instructions * kInstrSize);
- CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
- MultiPop(saved_regs);
-}
-
-
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
@@ -1009,9 +940,11 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
// Test if scratch > fd.
- // If fd < 2^31 we can convert it normally.
+ c(OLT, D, fd, scratch);
+
Label simple_convert;
- BranchF(&simple_convert, NULL, lt, fd, scratch);
+ // If fd < 2^31 we can convert it normally.
+ bc1t(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -1031,102 +964,6 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
-void MacroAssembler::BranchF(Label* target,
- Label* nan,
- Condition cc,
- FPURegister cmp1,
- FPURegister cmp2,
- BranchDelaySlot bd) {
- if (cc == al) {
- Branch(bd, target);
- return;
- }
-
- ASSERT(nan || target);
- // Check for unordered (NaN) cases.
- if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- }
-
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case Uless:
- case less:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case Ugreater:
- case greater:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case Ugreater_equal:
- case greater_equal:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case Uless_equal:
- case less_equal:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
- };
- }
-
- if (bd == PROTECT) {
- nop();
- }
-}
-
-
-void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
- // Handle special values first.
- bool force_load = dst.is(kDoubleRegZero);
- if (value.bits == zero.bits && !force_load) {
- mov_d(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits && !force_load) {
- neg_d(dst, kDoubleRegZero);
- } else {
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
- // Move the low part of the double into the lower of the corresponding FPU
- // register of FPU register pair.
- if (lo != 0) {
- li(at, Operand(lo));
- mtc1(at, dst);
- } else {
- mtc1(zero_reg, dst);
- }
- // Move the high part of the double into the higher of the corresponding FPU
- // register of FPU register pair.
- if (hi != 0) {
- li(at, Operand(hi));
- mtc1(at, dst.high());
- } else {
- mtc1(zero_reg, dst.high());
- }
- }
-}
-
-
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
@@ -1225,53 +1062,6 @@ void MacroAssembler::ConvertToInt32(Register source,
}
-void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
- DoubleRegister double_input,
- Register scratch1,
- Register except_flag,
- CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
-
- int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
-
- if (check_inexact == kDontCheckForInexactConversion) {
- // Ingore inexact exceptions.
- except_mask &= ~kFCSRInexactFlagMask;
- }
-
- // Save FCSR.
- cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- ctc1(zero_reg, FCSR);
-
- // Do operation based on rounding mode.
- switch (rounding_mode) {
- case kRoundToNearest:
- round_w_d(result, double_input);
- break;
- case kRoundToZero:
- trunc_w_d(result, double_input);
- break;
- case kRoundToPlusInf:
- ceil_w_d(result, double_input);
- break;
- case kRoundToMinusInf:
- floor_w_d(result, double_input);
- break;
- } // End of switch-statement.
-
- // Retrieve FCSR.
- cfc1(except_flag, FCSR);
- // Restore FCSR.
- ctc1(scratch1, FCSR);
-
- // Check for fpu exceptions.
- And(except_flag, except_flag, Operand(except_mask));
-}
-
-
void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
@@ -1358,21 +1148,22 @@ void MacroAssembler::EmitECMATruncate(Register result,
FPURegister double_input,
FPURegister single_scratch,
Register scratch,
- Register scratch2,
- Register scratch3) {
+ Register input_high,
+ Register input_low) {
CpuFeatures::Scope scope(FPU);
- ASSERT(!scratch2.is(result));
- ASSERT(!scratch3.is(result));
- ASSERT(!scratch3.is(scratch2));
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
ASSERT(!scratch.is(result) &&
- !scratch.is(scratch2) &&
- !scratch.is(scratch3));
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
ASSERT(!single_scratch.is(double_input));
Label done;
Label manual;
// Clear cumulative exception flags and save the FCSR.
+ Register scratch2 = input_high;
cfc1(scratch2, FCSR);
ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
@@ -1389,8 +1180,6 @@ void MacroAssembler::EmitECMATruncate(Register result,
Branch(&done, eq, scratch, Operand(zero_reg));
// Load the double value and perform a manual truncation.
- Register input_high = scratch2;
- Register input_low = scratch3;
Move(input_low, input_high, double_input);
EmitOutOfInt32RangeTruncate(result,
input_high,
@@ -1422,6 +1211,15 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+bool MacroAssembler::UseAbsoluteCodePointers() {
+ if (is_trampoline_emitted()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
BranchShort(offset, bdslot);
}
@@ -1435,18 +1233,11 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, bdslot);
- } else {
- Jr(L, bdslot);
- }
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jr(L, bdslot);
} else {
- if (is_trampoline_emitted()) {
- Jr(L, bdslot);
- } else {
- BranchShort(L, bdslot);
- }
+ BranchShort(L, bdslot);
}
}
@@ -1454,26 +1245,15 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
- }
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
} else {
- if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
- } else {
- BranchShort(L, cond, rs, rt, bdslot);
- }
+ BranchShort(L, cond, rs, rt, bdslot);
}
}
@@ -1496,8 +1276,8 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
Register scratch = at;
if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
r2 = rt.rm_;
switch (cond) {
case cc_always:
@@ -1999,18 +1779,11 @@ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, bdslot);
- } else {
- Jalr(L, bdslot);
- }
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jalr(L, bdslot);
} else {
- if (is_trampoline_emitted()) {
- Jalr(L, bdslot);
- } else {
- BranchAndLinkShort(L, bdslot);
- }
+ BranchAndLinkShort(L, bdslot);
}
}
@@ -2018,26 +1791,15 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
- if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
- bind(&skip);
- }
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
} else {
- if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
- bind(&skip);
- } else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- }
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
}
}
@@ -2544,10 +2306,10 @@ void MacroAssembler::Push(Handle<Object> handle) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
mov(a0, zero_reg);
li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -3210,140 +2972,15 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- Branch(&entry);
- bind(&loop);
- sw(filler, MemOperand(start_offset));
- Addu(start_offset, start_offset, kPointerSize);
- bind(&entry);
- Branch(&loop, lt, start_offset, Operand(end_offset));
-}
-
-
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 0);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register receiver_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
-
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- sw(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
-
- bind(&smi_value);
- Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, scratch2);
- // scratch1 is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
-}
-
-
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -3534,18 +3171,13 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
SetCallKind(t1, call_kind);
Call(code);
- call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
SetCallKind(t1, call_kind);
@@ -3563,9 +3195,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -3588,9 +3217,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
@@ -3613,9 +3239,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
ASSERT(function->is_compiled());
// Get the function and setup the context.
@@ -3626,11 +3249,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
+ UNIMPLEMENTED_MIPS();
} else {
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
}
@@ -3730,14 +3349,14 @@ void MacroAssembler::GetObjectType(Register object,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3749,7 +3368,7 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -3758,6 +3377,7 @@ MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
Condition cond,
Register r1,
const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3866,12 +3486,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addiu(sp, sp, num_arguments * kPointerSize);
@@ -3952,16 +3566,7 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
-
- if (left.is(right) && dst.is(left)) {
- ASSERT(!dst.is(t9));
- ASSERT(!scratch.is(t9));
- ASSERT(!left.is(t9));
- ASSERT(!right.is(t9));
- ASSERT(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
- }
+ ASSERT(!left.is(right));
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
@@ -3994,17 +3599,10 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
ASSERT(!scratch.is(left));
ASSERT(!scratch.is(right));
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left.is(right)) {
- mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
- }
-
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
subu(dst, left, right); // Left is overwritten.
@@ -4053,7 +3651,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
li(a0, Operand(function->nargs));
li(a1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1);
+ stub.SaveDoubles();
CallStub(&stub);
}
@@ -4123,9 +3722,6 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
@@ -4258,20 +3854,14 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
li(a0, Operand(p0));
push(a0);
li(a0, Operand(Smi::FromInt(p1 - p0)));
push(a0);
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
+ CallRuntime(Runtime::kAbort, 2);
// Will not return here.
if (is_trampoline_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -4655,23 +4245,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
static const int kRegisterPassedArguments = 4;
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments) {
- int stack_passed_words = 0;
- num_reg_arguments += 2 * num_double_arguments;
-
- // Up to four simple arguments are passed in registers a0..a3.
- if (num_reg_arguments > kRegisterPassedArguments) {
- stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
- }
- stack_passed_words += kCArgSlotCount;
- return stack_passed_words;
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- int num_double_arguments,
- Register scratch) {
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = ActivationFrameAlignment();
// Up to four simple arguments are passed in registers a0..a3.
@@ -4679,8 +4253,9 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// mips, even though those argument slots are not normally used.
// Remaining arguments are pushed on the stack, above (higher address than)
// the argument slots.
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ kCArgSlotCount;
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -4695,43 +4270,26 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- Register scratch) {
- PrepareCallCFunction(num_reg_arguments, 0, scratch);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments) {
- li(t8, Operand(function));
- CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
-}
-
-
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+ CallCFunctionHelper(no_reg, function, t8, num_arguments);
}
void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
int num_arguments) {
- CallCFunction(function, num_arguments, 0);
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_arguments);
}
void MacroAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- ASSERT(has_frame());
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -4759,15 +4317,19 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
- if (!function.is(t9)) {
+ if (function.is(no_reg)) {
+ function = t9;
+ li(function, Operand(function_reference));
+ } else if (!function.is(t9)) {
mov(t9, function);
function = t9;
}
Call(function);
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ kCArgSlotCount;
if (OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -4780,235 +4342,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void MacroAssembler::PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value) {
- lw(scratch, MemOperand(li_location));
- // At this point scratch is a lui(at, ...) instruction.
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be a lui.",
- scratch, Operand(LUI));
- lw(scratch, MemOperand(li_location));
- }
- srl(t9, new_value, kImm16Bits);
- Ins(scratch, t9, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location));
-
- lw(scratch, MemOperand(li_location, kInstrSize));
- // scratch is now ori(at, ...).
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be an ori.",
- scratch, Operand(ORI));
- lw(scratch, MemOperand(li_location, kInstrSize));
- }
- Ins(scratch, new_value, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location, kInstrSize));
-
- // Update the I-cache so the new lui and ori can be executed.
- FlushICache(li_location, 2);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
- And(scratch, object, Operand(~Page::kPageAlignmentMask));
- lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- And(scratch, scratch, Operand(mask));
- Branch(condition_met, cc, scratch, Operand(zero_reg));
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, t9, Operand(mask_scratch));
- Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
- // Shift left 1 by adding.
- Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
- Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
- And(t8, t9, Operand(mask_scratch));
- Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
- jmp(&other_color);
-
- bind(&word_boundary);
- lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- And(t9, t9, Operand(1));
- Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
- bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- ASSERT(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- sll(t8, t8, kPointerSizeLog2);
- Addu(bitmap_reg, bitmap_reg, t8);
- li(t8, Operand(1));
- sllv(mask_reg, t8, mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (FLAG_debug_code) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- sll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, FieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- srl(t9, t9, 1);
- bind(&skip);
- }
- Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors,
@@ -5020,60 +4353,6 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
-void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- ASSERT(!output_reg.is(input_reg));
- Label done;
- li(output_reg, Operand(255));
- // Normal branch: nop in delay slot.
- Branch(&done, gt, input_reg, Operand(output_reg));
- // Use delay slot in this branch.
- Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
- mov(output_reg, zero_reg); // In delay slot.
- mov(output_reg, input_reg); // Value is in range 0..255.
- bind(&done);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
- Label above_zero;
- Label done;
- Label in_bounds;
-
- Move(temp_double_reg, 0.0);
- BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
-
- // Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, zero_reg);
- Branch(&done);
-
- // Double value is >= 255, return 255.
- bind(&above_zero);
- Move(temp_double_reg, 255.0);
- BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
- li(result_reg, Operand(255));
- Branch(&done);
-
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
- round_w_d(temp_double_reg, input_reg);
- mfc1(result_reg, temp_double_reg);
- bind(&done);
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 6f81a4bd6a..5dd012e93e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -50,16 +50,15 @@ class JumpTarget;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
-
-// Register aliases.
+// Registers aliases
// cp is assumed to be a callee saved register.
-const Register lithiumScratchReg = s3; // Scratch register.
-const Register lithiumScratchReg2 = s4; // Scratch register.
-const Register condReg = s5; // Simulated (partial) condition code for mips.
const Register roots = s6; // Roots array pointer.
const Register cp = s7; // JavaScript context pointer.
const Register fp = s8_fp; // Alias for fp.
-const DoubleRegister lithiumScratchDouble = f30; // Double scratch register.
+// Registers used for condition evaluation.
+const Register condReg1 = s4;
+const Register condReg2 = s5;
+
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -91,43 +90,6 @@ enum BranchDelaySlot {
PROTECT
};
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-static inline MemOperand CFunctionArgumentOperand(int index) {
- ASSERT(index > kCArgSlotCount);
- // Argument 5 takes the slot just past the four Arg-slots.
- int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
- return MemOperand(sp, offset);
-}
-
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -176,22 +138,21 @@ class MacroAssembler: public Assembler {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Register target, COND_ARGS);
+ int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
- static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- COND_ARGS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
void Ret(COND_ARGS);
- inline void Ret(BranchDelaySlot bd, Condition cond = al,
- Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
- Ret(cond, rs, rt, bd);
+ inline void Ret(BranchDelaySlot bd) {
+ Ret(al, zero_reg, Operand(zero_reg), bd);
}
#undef COND_ARGS
@@ -236,8 +197,6 @@ class MacroAssembler: public Assembler {
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
}
- void Move(FPURegister dst, double imm);
-
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
@@ -247,7 +206,6 @@ class MacroAssembler: public Assembler {
Branch(L);
}
-
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -263,127 +221,39 @@ class MacroAssembler: public Assembler {
Condition cond, Register src1, const Operand& src2);
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // eq for new space, ne otherwise.
+ Label* branch);
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
- void JumpIfBlack(Register object,
+ // For the page containing |object| mark the region covering [address]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object,
+ Register address,
+ Register scratch);
+
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the 'at' register. RecordWrite updates the
+ // write barrier even when storing smis.
+ void RecordWrite(Register object,
+ Operand offset,
Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, ie it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- ra_status,
- save_fp,
- remembered_set_action,
- smi_check);
- }
+ Register scratch1);
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
// ---------------------------------------------------------------------------
@@ -647,14 +517,6 @@ class MacroAssembler: public Assembler {
Addu(sp, sp, 2 * kPointerSize);
}
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3) {
- lw(src3, MemOperand(sp, 0 * kPointerSize));
- lw(src2, MemOperand(sp, 1 * kPointerSize));
- lw(src1, MemOperand(sp, 2 * kPointerSize));
- Addu(sp, sp, 3 * kPointerSize);
- }
-
void Pop(uint32_t count = 1) {
Addu(sp, sp, Operand(count * kPointerSize));
}
@@ -673,17 +535,10 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
- // Does not handle errors.
- void FlushICache(Register address, unsigned instructions);
-
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
- // ---------------------------------------------------------------------------
- // FPU macros. These do not handle special cases like NaN or +- inf.
-
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -692,24 +547,6 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
- // Wrapper function for the different cmp/branch types.
- void BranchF(Label* target,
- Label* nan,
- Condition cc,
- FPURegister cmp1,
- FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
- // Alternate (inline) version for better readability with USE_DELAY_SLOT.
- inline void BranchF(BranchDelaySlot bd,
- Label* target,
- Label* nan,
- Condition cc,
- FPURegister cmp1,
- FPURegister cmp2) {
- BranchF(target, nan, cc, cmp1, cmp2, bd);
- };
-
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. If FPU is available double_scratch is used but not
@@ -721,18 +558,6 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
- // Truncates a double using a specific rounding mode.
- // The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexacat
- // exception is masked.
- void EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
- DoubleRegister double_input,
- Register scratch1,
- Register except_flag,
- CheckForInexactConversion check_inexact
- = kDontCheckForInexactConversion);
-
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
@@ -754,6 +579,15 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3);
+ // -------------------------------------------------------------------------
+ // Activation frames.
+
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
@@ -780,7 +614,6 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
-
// -------------------------------------------------------------------------
// JavaScript invokes.
@@ -869,13 +702,6 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
// -------------------------------------------------------------------------
// Support functions.
@@ -899,31 +725,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register receiver_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail);
-
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -953,21 +754,6 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
-
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string.
- Condition IsObjectStringType(Register obj,
- Register type,
- Register result) {
- lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
- And(type, type, Operand(kIsNotStringMask));
- ASSERT_EQ(0, kStringTag);
- return eq;
- }
-
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -1093,9 +879,6 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
// Before calling a C-function from generated code, align arguments on stack
// and add space for the four mips argument slots.
// After aligning the frame, non-register arguments must be stored on the
@@ -1105,11 +888,7 @@ class MacroAssembler: public Assembler {
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
+ void PrepareCallCFunction(int num_arguments, Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
@@ -1121,13 +900,7 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
@@ -1203,9 +976,6 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Number utilities.
@@ -1233,13 +1003,6 @@ class MacroAssembler: public Assembler {
Addu(reg, reg, reg);
}
- // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
- void SmiTagCheckOverflow(Register reg, Register overflow) {
- mov(overflow, reg); // Save original value.
- addu(reg, reg, reg);
- xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
- }
-
void SmiTag(Register dst, Register src) {
Addu(dst, src, src);
}
@@ -1254,11 +1017,10 @@ class MacroAssembler: public Assembler {
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = at,
- BranchDelaySlot bd = PROTECT) {
+ Register scratch = at) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
- Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
}
// Jump if the register contains a non-smi.
@@ -1328,29 +1090,13 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* failure);
- void ClampUint8(Register output_reg, Register input_reg);
-
- void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
-
-
void LoadInstanceDescriptors(Register map, Register descriptors);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Patch the relocated value (lui/ori pair).
- void PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value);
-
private:
void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments);
void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1386,33 +1132,25 @@ class MacroAssembler: public Assembler {
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ bool UseAbsoluteCodePointers();
+
bool generating_stub_;
bool allow_stub_calls_;
- bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -1453,6 +1191,34 @@ class CodePatcher {
};
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 9db5c5bed2..63e836f22f 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -377,12 +377,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address()));
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
- }
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
// Restore regexp engine registers.
__ MultiPop(regexp_registers_to_retain);
@@ -610,12 +607,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL,
- // no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
@@ -1253,14 +1244,13 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
// Stack is already aligned for call, so decrement by alignment
// to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- const int return_address_offset = kCArgsSlotsSize;
- __ Addu(a0, sp, return_address_offset);
- __ sw(ra, MemOperand(a0, 0));
+ __ Subu(sp, sp, Operand(stack_alignment));
+ __ sw(ra, MemOperand(sp, 0));
+ __ mov(a0, sp);
__ mov(t9, t1);
__ Call(t9);
- __ lw(ra, MemOperand(sp, return_address_offset));
- __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+ __ lw(ra, MemOperand(sp, 0));
+ __ Addu(sp, sp, Operand(stack_alignment));
__ Jump(ra);
}
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 4bad0a2ccd..5b949734fb 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -432,13 +432,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, a0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -451,13 +445,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, a0);
- __ RecordWriteField(scratch,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register v0).
@@ -566,10 +554,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(
- MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@@ -608,7 +595,6 @@ static MaybeObject* GenerateFastApiDirectCall(
const int kApiStackSpace = 4;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -640,7 +626,6 @@ static MaybeObject* GenerateFastApiDirectCall(
ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
- AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
@@ -819,7 +804,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label);
// Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@@ -837,8 +822,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
-
- // Leave the internal frame.
+ __ LeaveInternalFrame();
}
void LoadWithInterceptor(MacroAssembler* masm,
@@ -847,20 +831,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- __ Push(holder, name_);
+ __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1273,9 +1256,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
-
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
@@ -1336,43 +1317,41 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method).
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
- frame_scope.GenerateLeaveFrame();
- __ Ret();
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ __ LeaveInternalFrame();
+ __ Ret();
- // Leave the internal frame.
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
}
+ __ LeaveInternalFrame();
+
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
if (interceptor_holder != lookup->holder()) {
@@ -1601,7 +1580,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into v0 and calculate new length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1615,51 +1594,29 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
- // Check if value is a smi.
- Label with_write_barrier;
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(t0, &with_write_barrier);
-
// Save new length.
__ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
+ __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+ __ Addu(end_elements, end_elements, kPointerSize);
// Check for a smi.
+ __ JumpIfNotSmi(t0, &with_write_barrier);
+ __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
-
- __ lw(t2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastSmiOnlyElements(t2, t2, &call_builtin);
-
- // Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Push the element.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(end_elements, elements, end_elements);
- __ Addu(end_elements, end_elements, kEndElementsOffset);
- __ sw(t0, MemOperand(end_elements));
-
- __ RecordWrite(elements,
- end_elements,
- t0,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ InNewSpace(elements, t0, eq, &exit);
+ __ RecordWriteHelper(elements, end_elements, t0);
__ Drop(argc + 1);
__ Ret();
@@ -1671,15 +1628,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Branch(&call_builtin);
}
- __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(a2, &no_fast_elements_check);
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(t3, t3, &call_builtin);
- __ bind(&no_fast_elements_check);
-
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(
masm()->isolate());
@@ -1705,7 +1653,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ sw(t2, MemOperand(t3));
// Push the argument.
- __ sw(a2, MemOperand(end_elements));
+ __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ sw(t2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -2602,12 +2551,7 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
- // TODO(kasperl): For now, we always call indirectly through the
- // code field in the function to allow recompilation to take effect
- // without changing any of the call sites.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ UNIMPLEMENTED_MIPS();
} else {
__ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
JUMP_FUNCTION, call_kind);
@@ -2774,16 +2718,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
-
- // This trashes a0 but the value is returned in v0 anyway.
- __ RecordWriteField(t0,
- JSGlobalPropertyCell::kValueOffset,
- a0,
- a2,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET);
-
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
__ Ret();
@@ -3182,7 +3116,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@@ -3276,10 +3210,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
MapList* receiver_maps,
- CodeList* handler_stubs,
- MapList* transitioned_maps) {
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -3292,18 +3225,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
int receiver_count = receiver_maps->length();
__ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<Map> map(receiver_maps->at(i));
- Handle<Code> code(handler_stubs->at(i));
- if (transitioned_maps->at(i) == NULL) {
- __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
- } else {
- Label next_map;
- __ Branch(&next_map, eq, a3, Operand(map));
- __ li(t0, Operand(Handle<Map>(transitioned_maps->at(i))));
- __ Jump(code, RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
}
__ bind(&miss);
@@ -3532,7 +3457,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -3629,7 +3553,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3905,6 +3828,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the index is in range.
+ __ SmiUntag(t0, key);
__ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3912,6 +3836,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// a3: external array.
+ // t0: key (integer).
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
@@ -3923,6 +3848,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage.
+ // t0: key (integer).
// t1: value (integer).
switch (elements_kind) {
@@ -3939,36 +3865,33 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ mov(v0, t1); // Value is in range 0..255.
__ bind(&done);
__ mov(t1, v0);
-
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, t0);
__ sb(t1, MemOperand(t8, 0));
}
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, t0);
__ sb(t1, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
__ sh(t1, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
+ __ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ sw(t1, MemOperand(t8, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
- __ SmiUntag(t0, key);
StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, key, 2);
+ __ sll(t8, t0, 3);
__ addu(a3, a3, t8);
// a3: effective address of the double element
FloatingPointHelper::Destination destination;
@@ -3990,7 +3913,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3999,11 +3921,12 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
// Entry registers are intact, a0 holds the value which is the return value.
- __ mov(v0, a0);
+ __ mov(v0, value);
__ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// a3: external array.
+ // t0: index (integer).
__ bind(&check_heap_number);
__ GetObjectType(value, t1, t2);
__ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -4011,6 +3934,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage.
+ // t0: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
@@ -4023,11 +3947,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
+ __ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ swc1(f0, MemOperand(t8, 0));
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
+ __ sll(t8, t0, 3);
__ addu(t8, a3, t8);
__ sdc1(f0, MemOperand(t8, 0));
} else {
@@ -4036,18 +3960,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, t0);
__ sb(t3, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
__ sh(t3, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
+ __ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
@@ -4055,7 +3979,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4066,7 +3989,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Entry registers are intact, a0 holds the value
// which is the return value.
- __ mov(v0, a0);
+ __ mov(v0, value);
__ Ret();
} else {
// FPU is not available, do manual conversions.
@@ -4121,13 +4044,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ or_(t3, t7, t6);
__ bind(&done);
- __ sll(t9, key, 1);
+ __ sll(t9, a1, 2);
__ addu(t9, a2, t9);
__ sw(t3, MemOperand(t9, 0));
// Entry registers are intact, a0 holds the value which is the return
// value.
- __ mov(v0, a0);
+ __ mov(v0, value);
__ Ret();
__ bind(&nan_or_infinity_or_zero);
@@ -4145,7 +4068,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// t8: effective address of destination element.
__ sw(t4, MemOperand(t8, 0));
__ sw(t3, MemOperand(t8, Register::kSizeInBytes));
- __ mov(v0, a0);
__ Ret();
} else {
bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -4208,18 +4130,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, t0);
__ sb(t3, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
__ sh(t3, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
+ __ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
@@ -4227,7 +4149,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4377,10 +4298,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -4389,7 +4308,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- a3 : scratch
// -- a4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic;
Register value_reg = a0;
Register key_reg = a1;
@@ -4423,32 +4342,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Compare smis.
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- } else {
- ASSERT(elements_kind == FAST_ELEMENTS);
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- ASSERT(elements_kind == FAST_ELEMENTS);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- }
+ __ Addu(scratch,
+ elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch3, scratch2, scratch);
+ __ sw(value_reg, MemOperand(scratch3));
+ __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
// value_reg (a0) is preserved.
// Done.
__ Ret();
@@ -4457,10 +4358,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4478,15 +4375,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
Register value_reg = a0;
Register key_reg = a1;
Register receiver_reg = a2;
- Register elements_reg = a3;
- Register scratch1 = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
+ Register scratch = a3;
+ Register elements_reg = t0;
+ Register mantissa_reg = t1;
+ Register exponent_reg = t2;
Register scratch4 = t3;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4498,25 +4395,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ lw(scratch1,
+ __ lw(scratch,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
-
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- receiver_reg,
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+ // Handle smi values specially.
+ __ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ __ CheckMap(value_reg,
+ scratch,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
+
+ __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ __ bind(&have_double_value);
+ __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, elements_reg, Operand(scratch4));
+ __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ sw(exponent_reg, FieldMemOperand(scratch, offset));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, value_reg); // In delay slot.
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
+ __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+
+ __ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ __ jmp(&have_double_value);
+
+ __ bind(&smi_value);
+ __ Addu(scratch, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, scratch, scratch4);
+ // scratch is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(FPU)) {
+ destination = FloatingPointHelper::kFPURegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ Register untagged_value = receiver_reg;
+ __ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(
+ masm,
+ untagged_value,
+ destination,
+ f0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ f2);
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(f0, MemOperand(scratch, 0));
+ } else {
+ __ sw(mantissa_reg, MemOperand(scratch, 0));
+ __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+ }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value_reg); // In delay slot.
@@ -4525,10 +4487,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index 7a3fd090d1..a791dbba28 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -312,8 +312,7 @@ int main(int argc, char** argv) {
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- // TODO(gc): request full compaction?
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
context.Dispose();
CppByteSink sink(argv[1]);
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 6d2cf5f72c..8de7162ab2 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -94,9 +94,6 @@ void HeapObject::HeapObjectVerify() {
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
- case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpaceVerify();
- break;
case EXTERNAL_PIXEL_ARRAY_TYPE:
ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
break;
@@ -210,11 +207,6 @@ void ByteArray::ByteArrayVerify() {
}
-void FreeSpace::FreeSpaceVerify() {
- ASSERT(IsFreeSpace());
-}
-
-
void ExternalPixelArray::ExternalPixelArrayVerify() {
ASSERT(IsExternalPixelArray());
}
@@ -268,7 +260,7 @@ void JSObject::JSObjectVerify() {
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
- ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()),
+ ASSERT_EQ(map()->has_fast_elements(),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
@@ -330,8 +322,7 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
double value = get_scalar(i);
ASSERT(!isnan(value) ||
(BitCast<uint64_t>(value) ==
- BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
- ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
+ BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
}
}
}
@@ -396,7 +387,6 @@ void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
VerifyObjectField(kPrototypeOrInitialMapOffset);
VerifyObjectField(kNextFunctionLinkOffset);
- CHECK(code()->IsCode());
CHECK(next_function_link()->IsUndefined() ||
next_function_link()->IsJSFunction());
}
@@ -456,8 +446,9 @@ void Oddball::OddballVerify() {
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
- ASSERT(value <= 1);
// Hidden oddballs have negative smis.
+ const int kLeastHiddenOddballNumber = -4;
+ ASSERT(value <= 1);
ASSERT(value >= kLeastHiddenOddballNumber);
}
}
@@ -472,7 +463,6 @@ void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
void Code::CodeVerify() {
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
kCodeAlignment));
- relocation_info()->Verify();
Address last_gc_pc = NULL;
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify();
@@ -498,7 +488,7 @@ void JSWeakMap::JSWeakMapVerify() {
CHECK(IsJSWeakMap());
JSObjectVerify();
VerifyHeapPointer(table());
- ASSERT(table()->IsHashTable() || table()->IsUndefined());
+ ASSERT(table()->IsHashTable());
}
@@ -545,14 +535,13 @@ void JSRegExp::JSRegExpVerify() {
void JSProxy::JSProxyVerify() {
- CHECK(IsJSProxy());
+ ASSERT(IsJSProxy());
VerifyPointer(handler());
- ASSERT(hash()->IsSmi() || hash()->IsUndefined());
}
void JSFunctionProxy::JSFunctionProxyVerify() {
- CHECK(IsJSFunctionProxy());
+ ASSERT(IsJSFunctionProxy());
JSProxyVerify();
VerifyPointer(call_trap());
VerifyPointer(construct_trap());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index cebf9be074..8796865c29 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -43,11 +43,8 @@
#include "isolate.h"
#include "property.h"
#include "spaces.h"
-#include "store-buffer.h"
#include "v8memory.h"
-#include "incremental-marking.h"
-
namespace v8 {
namespace internal {
@@ -83,7 +80,16 @@ PropertyDetails PropertyDetails::AsDeleted() {
type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); \
+ }
+
+
+// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
+#define ACCESSORS_GCSAFE(holder, name, type, offset) \
+ type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode); \
}
@@ -141,12 +147,6 @@ bool Object::IsHeapObject() {
}
-bool Object::NonFailureIsHeapObject() {
- ASSERT(!this->IsFailure());
- return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
-}
-
-
bool Object::IsHeapNumber() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
@@ -165,13 +165,6 @@ bool Object::IsSpecObject() {
}
-bool Object::IsSpecFunction() {
- if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
-}
-
-
bool Object::IsSymbol() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -409,19 +402,6 @@ bool Object::IsByteArray() {
}
-bool Object::IsFreeSpace() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == FREE_SPACE_TYPE;
-}
-
-
-bool Object::IsFiller() {
- if (!Object::IsHeapObject()) return false;
- InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
- return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
-}
-
-
bool Object::IsExternalPixelArray() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
@@ -529,23 +509,20 @@ Failure* Failure::cast(MaybeObject* obj) {
bool Object::IsJSReceiver() {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
bool Object::IsJSObject() {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+ return IsJSReceiver() && !IsJSProxy();
}
bool Object::IsJSProxy() {
- if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
+ return Object::IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
+ HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
}
@@ -665,6 +642,7 @@ bool Object::IsCode() {
bool Object::IsOddball() {
+ ASSERT(HEAP->is_safe_to_read_maps());
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
}
@@ -961,20 +939,21 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-#define WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- }
-
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- } \
+// TODO(isolates): Pass heap in to these macros.
+#define WRITE_BARRIER(object, offset) \
+ object->GetHeap()->RecordWrite(object->address(), offset);
+
+// CONDITIONAL_WRITE_BARRIER must be issued after the actual
+// write due to the assert validating the written value.
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->RecordWrite(object->address(), offset); \
+ } else { \
+ ASSERT(mode == SKIP_WRITE_BARRIER); \
+ ASSERT(heap->InNewSpace(object) || \
+ !heap->InNewSpace(READ_FIELD(object, offset)) || \
+ Page::FromAddress(object->address())-> \
+ IsRegionDirty(object->address() + offset)); \
}
#ifndef V8_TARGET_ARCH_MIPS
@@ -995,6 +974,7 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
#endif // V8_TARGET_ARCH_MIPS
+
#ifndef V8_TARGET_ARCH_MIPS
#define WRITE_DOUBLE_FIELD(p, offset, value) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
@@ -1189,6 +1169,91 @@ HeapObject* MapWord::ToForwardingAddress() {
}
+bool MapWord::IsMarked() {
+ return (value_ & kMarkingMask) == 0;
+}
+
+
+void MapWord::SetMark() {
+ value_ &= ~kMarkingMask;
+}
+
+
+void MapWord::ClearMark() {
+ value_ |= kMarkingMask;
+}
+
+
+bool MapWord::IsOverflowed() {
+ return (value_ & kOverflowMask) != 0;
+}
+
+
+void MapWord::SetOverflow() {
+ value_ |= kOverflowMask;
+}
+
+
+void MapWord::ClearOverflow() {
+ value_ &= ~kOverflowMask;
+}
+
+
+MapWord MapWord::EncodeAddress(Address map_address, int offset) {
+ // Offset is the distance in live bytes from the first live object in the
+ // same page. The offset between two objects in the same page should not
+ // exceed the object area size of a page.
+ ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+
+ uintptr_t compact_offset = offset >> kObjectAlignmentBits;
+ ASSERT(compact_offset < (1 << kForwardingOffsetBits));
+
+ Page* map_page = Page::FromAddress(map_address);
+ ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
+
+ uintptr_t map_page_offset =
+ map_page->Offset(map_address) >> kMapAlignmentBits;
+
+ uintptr_t encoding =
+ (compact_offset << kForwardingOffsetShift) |
+ (map_page_offset << kMapPageOffsetShift) |
+ (map_page->mc_page_index << kMapPageIndexShift);
+ return MapWord(encoding);
+}
+
+
+Address MapWord::DecodeMapAddress(MapSpace* map_space) {
+ int map_page_index =
+ static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
+ ASSERT_MAP_PAGE_INDEX(map_page_index);
+
+ int map_page_offset = static_cast<int>(
+ ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
+ kMapAlignmentBits);
+
+ return (map_space->PageAddress(map_page_index) + map_page_offset);
+}
+
+
+int MapWord::DecodeOffset() {
+ // The offset field is represented in the kForwardingOffsetBits
+ // most-significant bits.
+ uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
+ ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
+ return static_cast<int>(offset);
+}
+
+
+MapWord MapWord::FromEncodedAddress(Address address) {
+ return MapWord(reinterpret_cast<uintptr_t>(address));
+}
+
+
+Address MapWord::ToEncodedAddress() {
+ return reinterpret_cast<Address>(value_);
+}
+
+
#ifdef DEBUG
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
@@ -1201,11 +1266,12 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
- Heap* heap =
- MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
- return heap;
+ // During GC, the map pointer in HeapObject is used in various ways that
+ // prevent us from retrieving Heap from the map.
+ // Assert that we are not in GC, implement GC code in a way that it doesn't
+ // pull heap from the map.
+ ASSERT(HEAP->is_safe_to_read_maps());
+ return map()->heap();
}
@@ -1221,17 +1287,6 @@ Map* HeapObject::map() {
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
- if (value != NULL) {
- // TODO(1600) We are passing NULL as a slot because maps can never be on
- // evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
- }
-}
-
-
-// Unsafe accessor omitting write barrier.
-void HeapObject::set_map_unsafe(Map* value) {
- set_map_word(MapWord::FromMap(value));
}
@@ -1274,6 +1329,47 @@ void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
}
+bool HeapObject::IsMarked() {
+ return map_word().IsMarked();
+}
+
+
+void HeapObject::SetMark() {
+ ASSERT(!IsMarked());
+ MapWord first_word = map_word();
+ first_word.SetMark();
+ set_map_word(first_word);
+}
+
+
+void HeapObject::ClearMark() {
+ ASSERT(IsMarked());
+ MapWord first_word = map_word();
+ first_word.ClearMark();
+ set_map_word(first_word);
+}
+
+
+bool HeapObject::IsOverflowed() {
+ return map_word().IsOverflowed();
+}
+
+
+void HeapObject::SetOverflow() {
+ MapWord first_word = map_word();
+ first_word.SetOverflow();
+ set_map_word(first_word);
+}
+
+
+void HeapObject::ClearOverflow() {
+ ASSERT(IsOverflowed());
+ MapWord first_word = map_word();
+ first_word.ClearOverflow();
+ set_map_word(first_word);
+}
+
+
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1304,77 +1400,16 @@ FixedArrayBase* JSObject::elements() {
return static_cast<FixedArrayBase*>(array);
}
-void JSObject::ValidateSmiOnlyElements() {
-#if DEBUG
- if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
- Heap* heap = GetHeap();
- // Don't use elements, since integrity checks will fail if there
- // are filler pointers in the array.
- FixedArray* fixed_array =
- reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- // Arrays that have been shifted in place can't be verified.
- if (map != heap->raw_unchecked_one_pointer_filler_map() &&
- map != heap->raw_unchecked_two_pointer_filler_map() &&
- map != heap->free_space_map()) {
- for (int i = 0; i < fixed_array->length(); i++) {
- Object* current = fixed_array->get(i);
- ASSERT(current->IsSmi() || current == heap->the_hole_value());
- }
- }
- }
-#endif
-}
-
-
-MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
-#if DEBUG
- ValidateSmiOnlyElements();
-#endif
- if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- set_map(Map::cast(obj));
- }
- return this;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count) {
- if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
- return EnsureCanContainNonSmiElements();
- }
- }
- }
- return this;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
- Object** objects = reinterpret_cast<Object**>(
- FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
- return EnsureCanContainElements(objects, elements->length());
-}
-
void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
- ASSERT((map()->has_fast_elements() ||
- map()->has_fast_smi_only_elements()) ==
+ ASSERT(map()->has_fast_elements() ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_double_elements() ==
value->IsFixedDoubleArray());
ASSERT(value->HasValidElements());
-#ifdef DEBUG
- ValidateSmiOnlyElements();
-#endif
WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
}
@@ -1385,7 +1420,7 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
- ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
+ ASSERT(map()->has_fast_elements());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
}
@@ -1393,11 +1428,9 @@ void JSObject::initialize_elements() {
MaybeObject* JSObject::ResetElements() {
Object* obj;
- ElementsKind elements_kind = FLAG_smi_only_arrays
- ? FAST_SMI_ONLY_ELEMENTS
- : FAST_ELEMENTS;
- MaybeObject* maybe_obj = GetElementsTransitionMap(elements_kind);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
set_map(Map::cast(obj));
initialize_elements();
return this;
@@ -1409,12 +1442,12 @@ ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
byte Oddball::kind() {
- return Smi::cast(READ_FIELD(this, kKindOffset))->value();
+ return READ_BYTE_FIELD(this, kKindOffset);
}
void Oddball::set_kind(byte value) {
- WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
+ WRITE_BYTE_FIELD(this, kKindOffset, value);
}
@@ -1427,8 +1460,6 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
- GetHeap()->incremental_marking()->RecordWrite(
- this, HeapObject::RawField(this, kValueOffset), val);
}
@@ -1497,7 +1528,7 @@ void JSObject::SetInternalField(int index, Object* value) {
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(this, offset);
}
@@ -1523,7 +1554,7 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) {
if (index < 0) {
int offset = map()->instance_size() + (index * kPointerSize);
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(this, offset);
} else {
ASSERT(index < properties()->length());
properties()->set(index, value);
@@ -1557,32 +1588,16 @@ Object* JSObject::InObjectPropertyAtPut(int index,
ASSERT(index < 0);
int offset = map()->instance_size() + (index * kPointerSize);
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
return value;
}
-void JSObject::InitializeBody(Map* map,
- Object* pre_allocated_value,
- Object* filler_value) {
- ASSERT(!filler_value->IsHeapObject() ||
- !GetHeap()->InNewSpace(filler_value));
- ASSERT(!pre_allocated_value->IsHeapObject() ||
- !GetHeap()->InNewSpace(pre_allocated_value));
- int size = map->instance_size();
- int offset = kHeaderSize;
- if (filler_value != pre_allocated_value) {
- int pre_allocated = map->pre_allocated_property_fields();
- ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
- for (int i = 0; i < pre_allocated; i++) {
- WRITE_FIELD(this, offset, pre_allocated_value);
- offset += kPointerSize;
- }
- }
- while (offset < size) {
- WRITE_FIELD(this, offset, filler_value);
- offset += kPointerSize;
+void JSObject::InitializeBody(int object_size, Object* value) {
+ ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
+ for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+ WRITE_FIELD(this, offset, value);
}
}
@@ -1668,7 +1683,7 @@ void FixedArray::set(int index, Object* value) {
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(this, offset);
}
@@ -1740,7 +1755,11 @@ void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
old_length * kDoubleSize);
} else {
for (int i = 0; i < old_length; ++i) {
- set(i, from->get_scalar(i));
+ if (from->is_the_hole(i)) {
+ set_the_hole(i);
+ } else {
+ set(i, from->get_scalar(i));
+ }
}
}
int offset = kHeaderSize + old_length * kDoubleSize;
@@ -1753,7 +1772,7 @@ void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
void FixedDoubleArray::Initialize(FixedArray* from) {
int old_length = from->length();
- ASSERT(old_length <= length());
+ ASSERT(old_length < length());
for (int i = 0; i < old_length; i++) {
Object* hole_or_object = from->get(i);
if (hole_or_object->IsTheHole()) {
@@ -1787,9 +1806,7 @@ void FixedDoubleArray::Initialize(NumberDictionary* from) {
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
- Heap* heap = GetHeap();
- if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
- if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+ if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
@@ -1801,7 +1818,7 @@ void FixedArray::set(int index,
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
}
@@ -1810,10 +1827,6 @@ void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
ASSERT(index >= 0 && index < array->length());
ASSERT(!HEAP->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
- array->GetHeap()->incremental_marking()->RecordWrite(
- array,
- HeapObject::RawField(array, kHeaderSize + index * kPointerSize),
- value);
}
@@ -1866,7 +1879,7 @@ void FixedArray::set_unchecked(Heap* heap,
WriteBarrierMode mode) {
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
}
@@ -2145,7 +2158,6 @@ CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(ExternalArray)
CAST_ACCESSOR(ExternalByteArray)
CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -2172,7 +2184,6 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
@@ -2329,7 +2340,7 @@ String* SlicedString::parent() {
void SlicedString::set_parent(String* parent) {
- ASSERT(parent->IsSeqString() || parent->IsExternalString());
+ ASSERT(parent->IsSeqString());
WRITE_FIELD(this, kParentOffset, parent);
}
@@ -2349,7 +2360,7 @@ Object* ConsString::unchecked_first() {
void ConsString::set_first(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
}
@@ -2365,31 +2376,29 @@ Object* ConsString::unchecked_second() {
void ConsString::set_second(String* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
}
-const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+ExternalAsciiString::Resource* ExternalAsciiString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
void ExternalAsciiString::set_resource(
- const ExternalAsciiString::Resource* resource) {
- *reinterpret_cast<const Resource**>(
- FIELD_ADDR(this, kResourceOffset)) = resource;
+ ExternalAsciiString::Resource* resource) {
+ *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
}
-const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
void ExternalTwoByteString::set_resource(
- const ExternalTwoByteString::Resource* resource) {
- *reinterpret_cast<const Resource**>(
- FIELD_ADDR(this, kResourceOffset)) = resource;
+ ExternalTwoByteString::Resource* resource) {
+ *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
}
@@ -2689,9 +2698,6 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
- if (instance_type == FREE_SPACE_TYPE) {
- return reinterpret_cast<FreeSpace*>(this)->size();
- }
if (instance_type == STRING_TYPE) {
return SeqTwoByteString::SizeFor(
reinterpret_cast<SeqTwoByteString*>(this)->length());
@@ -2853,6 +2859,12 @@ JSFunction* Map::unchecked_constructor() {
}
+FixedArray* Map::unchecked_prototype_transitions() {
+ return reinterpret_cast<FixedArray*>(
+ READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -2924,19 +2936,6 @@ void Code::set_major_key(int major) {
}
-bool Code::is_pregenerated() {
- return kind() == STUB && IsPregeneratedField::decode(flags());
-}
-
-
-void Code::set_is_pregenerated(bool value) {
- ASSERT(kind() == STUB);
- Flags f = flags();
- f = static_cast<Flags>(IsPregeneratedField::update(f, value));
- set_flags(f);
-}
-
-
bool Code::optimizable() {
ASSERT(kind() == FUNCTION);
return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -3102,19 +3101,6 @@ void Code::set_to_boolean_state(byte value) {
WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
}
-
-bool Code::has_function_cache() {
- ASSERT(kind() == STUB);
- return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
-}
-
-
-void Code::set_has_function_cache(bool flag) {
- ASSERT(kind() == STUB);
- WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
-}
-
-
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3200,6 +3186,48 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
}
+Isolate* Map::isolate() {
+ return heap()->isolate();
+}
+
+
+Heap* Map::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
+Heap* Code::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
+Isolate* Code::isolate() {
+ return heap()->isolate();
+}
+
+
+Heap* JSGlobalPropertyCell::heap() {
+ // NOTE: address() helper is not used to save one instruction.
+ Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
+}
+
+
+Isolate* JSGlobalPropertyCell::isolate() {
+ return heap()->isolate();
+}
+
+
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
return HeapObject::
FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3214,7 +3242,46 @@ Object* Map::prototype() {
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
ASSERT(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
+}
+
+
+MaybeObject* Map::GetFastElementsMap() {
+ if (has_fast_elements()) return this;
+ Object* obj;
+ { MaybeObject* maybe_obj = CopyDropTransitions();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ Map* new_map = Map::cast(obj);
+ new_map->set_elements_kind(FAST_ELEMENTS);
+ isolate()->counters()->map_to_fast_elements()->Increment();
+ return new_map;
+}
+
+
+MaybeObject* Map::GetFastDoubleElementsMap() {
+ if (has_fast_double_elements()) return this;
+ Object* obj;
+ { MaybeObject* maybe_obj = CopyDropTransitions();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ Map* new_map = Map::cast(obj);
+ new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+ isolate()->counters()->map_to_fast_double_elements()->Increment();
+ return new_map;
+}
+
+
+MaybeObject* Map::GetSlowElementsMap() {
+ if (!has_fast_elements() && !has_fast_double_elements()) return this;
+ Object* obj;
+ { MaybeObject* maybe_obj = CopyDropTransitions();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ Map* new_map = Map::cast(obj);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ isolate()->counters()->map_to_slow_elements()->Increment();
+ return new_map;
}
@@ -3249,8 +3316,7 @@ void Map::set_instance_descriptors(DescriptorArray* value,
WriteBarrierMode mode) {
Object* object = READ_FIELD(this,
kInstanceDescriptorsOrBitField3Offset);
- Heap* heap = GetHeap();
- if (value == heap->empty_descriptor_array()) {
+ if (value == isolate()->heap()->empty_descriptor_array()) {
clear_instance_descriptors();
return;
} else {
@@ -3263,8 +3329,10 @@ void Map::set_instance_descriptors(DescriptorArray* value,
}
ASSERT(!is_shared());
WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(),
+ this,
+ kInstanceDescriptorsOrBitField3Offset,
+ mode);
}
@@ -3293,22 +3361,14 @@ void Map::set_bit_field3(int value) {
}
-FixedArray* Map::unchecked_prototype_transitions() {
- return reinterpret_cast<FixedArray*>(
- READ_FIELD(this, kPrototypeTransitionsOffset));
-}
-
-
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction,
- next_function_link,
- Object,
- kNextFunctionLinkOffset)
+ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
+ kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3397,8 +3457,8 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -3604,7 +3664,7 @@ Code* SharedFunctionInfo::unchecked_code() {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
- CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
+ ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
}
@@ -3617,11 +3677,7 @@ SerializedScopeInfo* SharedFunctionInfo::scope_info() {
void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
WriteBarrierMode mode) {
WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(GetHeap(),
- this,
- kScopeInfoOffset,
- reinterpret_cast<Object*>(value),
- mode);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
}
@@ -3718,13 +3774,10 @@ Code* JSFunction::unchecked_code() {
void JSFunction::set_code(Code* value) {
+ // Skip the write barrier because code is never in new space.
ASSERT(!HEAP->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
- GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
- this,
- HeapObject::RawField(this, kCodeEntryOffset),
- value);
}
@@ -3764,7 +3817,7 @@ SharedFunctionInfo* JSFunction::unchecked_shared() {
void JSFunction::set_context(Object* value) {
ASSERT(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
+ WRITE_BARRIER(this, kContextOffset);
}
ACCESSORS(JSFunction, prototype_or_initial_map, Object,
@@ -3838,7 +3891,7 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Object* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
- WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
+ WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
}
@@ -3857,7 +3910,6 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-ACCESSORS(JSProxy, hash, Object, kHashOffset)
ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
@@ -3870,8 +3922,8 @@ void JSProxy::InitializeBody(int object_size, Object* value) {
}
-ACCESSORS(JSWeakMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
+ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
ObjectHashTable* JSWeakMap::unchecked_table() {
@@ -3963,8 +4015,9 @@ byte* Code::entry() {
}
-bool Code::contains(byte* inner_pointer) {
- return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+bool Code::contains(byte* pc) {
+ return (instruction_start() <= pc) &&
+ (pc <= instruction_start() + instruction_size());
}
@@ -4043,7 +4096,6 @@ void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
if (value->IsSmi()) {
fa->set_unchecked(index, Smi::cast(value));
} else {
- // We only do this during GC, so we don't need to notify the write barrier.
fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
}
}
@@ -4051,20 +4103,15 @@ void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
-#if DEBUG
- FixedArrayBase* fixed_array =
- reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
+ ASSERT((kind == FAST_ELEMENTS &&
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map())) ||
(kind == FAST_DOUBLE_ELEMENTS &&
- fixed_array->IsFixedDoubleArray()) ||
+ elements()->IsFixedDoubleArray()) ||
(kind == DICTIONARY_ELEMENTS &&
- fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
+ elements()->IsFixedArray() &&
+ elements()->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
-#endif
return kind;
}
@@ -4079,18 +4126,6 @@ bool JSObject::HasFastElements() {
}
-bool JSObject::HasFastSmiOnlyElements() {
- return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
-}
-
-
-bool JSObject::HasFastTypeElements() {
- ElementsKind elements_kind = GetElementsKind();
- return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
- elements_kind == FAST_ELEMENTS;
-}
-
-
bool JSObject::HasFastDoubleElements() {
return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
}
@@ -4101,11 +4136,6 @@ bool JSObject::HasDictionaryElements() {
}
-bool JSObject::HasNonStrictArgumentsElements() {
- return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
-}
-
-
bool JSObject::HasExternalArrayElements() {
HeapObject* array = elements();
ASSERT(array != NULL);
@@ -4157,7 +4187,7 @@ bool JSObject::AllowsSetElementsLength() {
MaybeObject* JSObject::EnsureWritableFastElements() {
- ASSERT(HasFastTypeElements());
+ ASSERT(HasFastElements());
FixedArray* elems = FixedArray::cast(elements());
Isolate* isolate = GetIsolate();
if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4333,18 +4363,44 @@ Object* JSObject::BypassGlobalProxy() {
}
-MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
- return IsJSProxy()
- ? JSProxy::cast(this)->GetIdentityHash(flag)
- : JSObject::cast(this)->GetIdentityHash(flag);
+bool JSObject::HasHiddenPropertiesObject() {
+ ASSERT(!IsJSGlobalProxy());
+ return GetPropertyAttributePostInterceptor(this,
+ GetHeap()->hidden_symbol(),
+ false) != ABSENT;
}
-bool JSReceiver::HasElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
- }
- return JSObject::cast(this)->HasElementWithReceiver(this, index);
+Object* JSObject::GetHiddenPropertiesObject() {
+ ASSERT(!IsJSGlobalProxy());
+ PropertyAttributes attributes;
+ // You can't install a getter on a property indexed by the hidden symbol,
+ // so we can be sure that GetLocalPropertyPostInterceptor returns a real
+ // object.
+ Object* result =
+ GetLocalPropertyPostInterceptor(this,
+ GetHeap()->hidden_symbol(),
+ &attributes)->ToObjectUnchecked();
+ return result;
+}
+
+
+MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
+ ASSERT(!IsJSGlobalProxy());
+ return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+ hidden_obj,
+ DONT_ENUM,
+ kNonStrictMode);
+}
+
+
+bool JSObject::HasHiddenProperties() {
+ return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
+}
+
+
+bool JSObject::HasElement(uint32_t index) {
+ return HasElementWithReceiver(this, index);
}
@@ -4456,27 +4512,27 @@ MaybeObject* StringDictionaryShape::AsObject(String* key) {
}
-bool ObjectHashTableShape::IsMatch(JSReceiver* key, Object* other) {
- return key == JSReceiver::cast(other);
+bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
+ return key == JSObject::cast(other);
}
-uint32_t ObjectHashTableShape::Hash(JSReceiver* key) {
- MaybeObject* maybe_hash = key->GetIdentityHash(OMIT_CREATION);
+uint32_t ObjectHashTableShape::Hash(JSObject* key) {
+ MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
ASSERT(!maybe_hash->IsFailure());
return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
-uint32_t ObjectHashTableShape::HashForObject(JSReceiver* key, Object* other) {
- MaybeObject* maybe_hash =
- JSReceiver::cast(other)->GetIdentityHash(OMIT_CREATION);
+uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
+ MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
+ JSObject::OMIT_CREATION);
ASSERT(!maybe_hash->IsFailure());
return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
-MaybeObject* ObjectHashTableShape::AsObject(JSReceiver* key) {
+MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
return key;
}
@@ -4496,7 +4552,7 @@ void Map::ClearCodeCache(Heap* heap) {
void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastTypeElements());
+ ASSERT(HasFastElements());
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
@@ -4514,17 +4570,13 @@ void JSArray::EnsureSize(int required_size) {
void JSArray::set_length(Smi* length) {
- // Don't need a write barrier for a Smi.
set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
}
-MaybeObject* JSArray::SetContent(FixedArray* storage) {
- MaybeObject* maybe_object = EnsureCanContainElements(storage);
- if (maybe_object->IsFailure()) return maybe_object;
+void JSArray::SetContent(FixedArray* storage) {
set_length(Smi::FromInt(storage->length()));
set_elements(storage);
- return this;
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index fc7573241a..0398572f90 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -82,18 +82,12 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberPrint(out);
break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
- break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(out);
break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayPrint(out);
break;
- case FREE_SPACE_TYPE:
- FreeSpace::cast(this)->FreeSpacePrint(out);
- break;
case EXTERNAL_PIXEL_ARRAY_TYPE:
ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
break;
@@ -195,11 +189,6 @@ void ByteArray::ByteArrayPrint(FILE* out) {
}
-void FreeSpace::FreeSpacePrint(FILE* out) {
- PrintF(out, "free space, size %d", Size());
-}
-
-
void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
PrintF(out, "external pixel array");
}
@@ -245,54 +234,6 @@ void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
}
-static void PrintElementsKind(FILE* out, ElementsKind kind) {
- switch (kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- PrintF(out, "FAST_SMI_ONLY_ELEMENTS");
- break;
- case FAST_ELEMENTS:
- PrintF(out, "FAST_ELEMENTS");
- break;
- case FAST_DOUBLE_ELEMENTS:
- PrintF(out, "FAST_DOUBLE_ELEMENTS");
- break;
- case DICTIONARY_ELEMENTS:
- PrintF(out, "DICTIONARY_ELEMENTS");
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- PrintF(out, "NON_STRICT_ARGUMENTS_ELEMENTS");
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- PrintF(out, "EXTERNAL_BYTE_ELEMENTS");
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- PrintF(out, "EXTERNAL_UNSIGNED_BYTE_ELEMENTS");
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- PrintF(out, "EXTERNAL_SHORT_ELEMENTS");
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- PrintF(out, "EXTERNAL_UNSIGNED_SHORT_ELEMENTS");
- break;
- case EXTERNAL_INT_ELEMENTS:
- PrintF(out, "EXTERNAL_INT_ELEMENTS");
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- PrintF(out, "EXTERNAL_UNSIGNED_INT_ELEMENTS");
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- PrintF(out, "EXTERNAL_FLOAT_ELEMENTS");
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
- break;
- }
-}
-
-
void JSObject::PrintProperties(FILE* out) {
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
@@ -315,33 +256,14 @@ void JSObject::PrintProperties(FILE* out) {
descs->GetCallbacksObject(i)->ShortPrint(out);
PrintF(out, " (callback)\n");
break;
- case ELEMENTS_TRANSITION: {
- PrintF(out, "(elements transition to ");
- Object* descriptor_contents = descs->GetValue(i);
- if (descriptor_contents->IsMap()) {
- Map* map = Map::cast(descriptor_contents);
- PrintElementsKind(out, map->elements_kind());
- } else {
- FixedArray* map_array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < map_array->length(); ++i) {
- Map* map = Map::cast(map_array->get(i));
- if (i != 0) {
- PrintF(out, ", ");
- }
- PrintElementsKind(out, map->elements_kind());
- }
- }
- PrintF(out, ")\n");
- break;
- }
case MAP_TRANSITION:
- PrintF(out, "(map transition)\n");
+ PrintF(out, " (map transition)\n");
break;
case CONSTANT_TRANSITION:
- PrintF(out, "(constant transition)\n");
+ PrintF(out, " (constant transition)\n");
break;
case NULL_DESCRIPTOR:
- PrintF(out, "(null descriptor)\n");
+ PrintF(out, " (null descriptor)\n");
break;
default:
UNREACHABLE();
@@ -355,10 +277,7 @@ void JSObject::PrintProperties(FILE* out) {
void JSObject::PrintElements(FILE* out) {
- // Don't call GetElementsKind, its validation code can cause the printer to
- // fail when debugging.
- switch (map()->elements_kind()) {
- case FAST_SMI_ONLY_ELEMENTS:
+ switch (GetElementsKind()) {
case FAST_ELEMENTS: {
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
@@ -466,13 +385,8 @@ void JSObject::PrintElements(FILE* out) {
void JSObject::JSObjectPrint(FILE* out) {
PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
- PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
- // Don't call GetElementsKind, its validation code can cause the printer to
- // fail when debugging.
- PrintElementsKind(out, this->map()->elements_kind());
- PrintF(out,
- "]\n - prototype = %p\n",
- reinterpret_cast<void*>(GetPrototype()));
+ PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
PrintF(out, " {\n");
PrintProperties(out);
PrintElements(out);
@@ -501,7 +415,6 @@ static const char* TypeToString(InstanceType type) {
case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
- case FREE_SPACE_TYPE: return "FREE_SPACE";
case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -545,9 +458,7 @@ void Map::MapPrint(FILE* out) {
PrintF(out, " - type: %s\n", TypeToString(instance_type()));
PrintF(out, " - instance size: %d\n", instance_size());
PrintF(out, " - inobject properties: %d\n", inobject_properties());
- PrintF(out, " - elements kind: ");
- PrintElementsKind(out, elements_kind());
- PrintF(out, "\n - pre-allocated property fields: %d\n",
+ PrintF(out, " - pre-allocated property fields: %d\n",
pre_allocated_property_fields());
PrintF(out, " - unused property fields: %d\n", unused_property_fields());
if (is_hidden_prototype()) {
@@ -605,16 +516,6 @@ void FixedArray::FixedArrayPrint(FILE* out) {
}
-void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
- HeapObject::PrintHeader(out, "FixedDoubleArray");
- PrintF(out, " - length: %d", length());
- for (int i = 0; i < length(); i++) {
- PrintF(out, "\n [%d]: %g", i, get_scalar(i));
- }
- PrintF(out, "\n");
-}
-
-
void JSValue::JSValuePrint(FILE* out) {
HeapObject::PrintHeader(out, "ValueObject");
value()->Print(out);
@@ -686,8 +587,6 @@ void JSProxy::JSProxyPrint(FILE* out) {
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - handler = ");
handler()->Print(out);
- PrintF(out, " - hash = ");
- hash()->Print(out);
PrintF(out, "\n");
}
@@ -708,6 +607,7 @@ void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
void JSWeakMap::JSWeakMapPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSWeakMap");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
PrintF(out, " - table = ");
table()->ShortPrint(out);
PrintF(out, "\n");
@@ -902,15 +802,10 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "ObjectTemplateInfo");
- PrintF(out, " - tag: ");
- tag()->ShortPrint(out);
- PrintF(out, "\n - property_list: ");
- property_list()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
PrintF(out, "\n - internal_field_count: ");
internal_field_count()->ShortPrint(out);
- PrintF(out, "\n");
}
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
deleted file mode 100644
index 6f0f61d351..0000000000
--- a/deps/v8/src/objects-visiting-inl.h
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
-
-
-namespace v8 {
-namespace internal {
-
-template<typename StaticVisitor>
-void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor,
- ConsString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor,
- SlicedString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
- table_.Register(kVisitGlobalContext,
- &FixedBodyVisitor<StaticVisitor,
- Context::ScavengeBodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitByteArray, &VisitByteArray);
-
- table_.Register(kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticVisitor,
- SharedFunctionInfo::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
- table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
- table_.Register(kVisitJSFunction,
- &JSObjectVisitor::
- template VisitSpecialized<JSFunction::kSize>);
-
- table_.Register(kVisitFreeSpace, &VisitFreeSpace);
-
- table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
- table_.template RegisterSpecializations<DataObjectVisitor,
- kVisitDataObject,
- kVisitDataObjectGeneric>();
-
- table_.template RegisterSpecializations<JSObjectVisitor,
- kVisitJSObject,
- kVisitJSObjectGeneric>();
- table_.template RegisterSpecializations<StructVisitor,
- kVisitStruct,
- kVisitStructGeneric>();
-}
-
-
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- IteratePointer(v, kRelocationInfoOffset);
- IteratePointer(v, kDeoptimizationDataOffset);
-
- RelocIterator it(this, mode_mask);
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
- }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
- StaticVisitor::VisitPointer(
- heap,
- reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-
- RelocIterator it(this, mode_mask);
- for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>(heap);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_OBJECTS_VISITING_INL_H_
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 20a7b31701..0aa21dd6ed 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -73,9 +73,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case BYTE_ARRAY_TYPE:
return kVisitByteArray;
- case FREE_SPACE_TYPE:
- return kVisitFreeSpace;
-
case FIXED_ARRAY_TYPE:
return kVisitFixedArray;
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index e6ddfed4a7..4ce1bd077b 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -30,6 +30,22 @@
#include "allocation.h"
+#if V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
// Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -51,7 +67,6 @@ class StaticVisitorBase : public AllStatic {
kVisitSeqTwoByteString,
kVisitShortcutCandidate,
kVisitByteArray,
- kVisitFreeSpace,
kVisitFixedArray,
kVisitFixedDoubleArray,
kVisitGlobalContext,
@@ -157,10 +172,6 @@ class VisitorDispatchTable {
}
}
- inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
- return reinterpret_cast<Callback>(callbacks_[id]);
- }
-
inline Callback GetVisitor(Map* map) {
return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
}
@@ -225,7 +236,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(),
+ map->heap(),
object,
BodyDescriptor::kStartOffset,
object_size);
@@ -236,7 +247,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(),
+ map->heap(),
object,
BodyDescriptor::kStartOffset,
object_size);
@@ -250,7 +261,7 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(),
+ map->heap(),
object,
BodyDescriptor::kStartOffset,
BodyDescriptor::kEndOffset);
@@ -278,7 +289,63 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
template<typename StaticVisitor>
class StaticNewSpaceVisitor : public StaticVisitorBase {
public:
- static void Initialize();
+ static void Initialize() {
+ table_.Register(kVisitShortcutCandidate,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitConsString,
+ &FixedBodyVisitor<StaticVisitor,
+ ConsString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitSlicedString,
+ &FixedBodyVisitor<StaticVisitor,
+ SlicedString::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
+ table_.Register(kVisitGlobalContext,
+ &FixedBodyVisitor<StaticVisitor,
+ Context::ScavengeBodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitByteArray, &VisitByteArray);
+
+ table_.Register(kVisitSharedFunctionInfo,
+ &FixedBodyVisitor<StaticVisitor,
+ SharedFunctionInfo::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(kVisitJSWeakMap, &VisitJSObject);
+
+ table_.Register(kVisitJSRegExp, &VisitJSObject);
+
+ table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+ table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+ table_.Register(kVisitJSFunction,
+ &JSObjectVisitor::
+ template VisitSpecialized<JSFunction::kSize>);
+
+ table_.RegisterSpecializations<DataObjectVisitor,
+ kVisitDataObject,
+ kVisitDataObjectGeneric>();
+ table_.RegisterSpecializations<JSObjectVisitor,
+ kVisitJSObject,
+ kVisitJSObjectGeneric>();
+ table_.RegisterSpecializations<StructVisitor,
+ kVisitStruct,
+ kVisitStructGeneric>();
+ }
static inline int IterateBody(Map* map, HeapObject* obj) {
return table_.GetVisitor(map)(map, obj);
@@ -312,10 +379,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
SeqTwoByteStringSize(map->instance_type());
}
- static inline int VisitFreeSpace(Map* map, HeapObject* object) {
- return FreeSpace::cast(object)->Size();
- }
-
class DataObjectVisitor {
public:
template<int object_size>
@@ -347,6 +410,55 @@ VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
StaticNewSpaceVisitor<StaticVisitor>::table_;
+void Code::CodeIterateBody(ObjectVisitor* v) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ // Use the relocation info pointer before it is visited by
+ // the heap compaction in the next statement.
+ RelocIterator it(this, mode_mask);
+
+ IteratePointer(v, kRelocationInfoOffset);
+ IteratePointer(v, kDeoptimizationDataOffset);
+
+ for (; !it.done(); it.next()) {
+ it.rinfo()->Visit(v);
+ }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+ RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ // Use the relocation info pointer before it is visited by
+ // the heap compaction in the next statement.
+ RelocIterator it(this, mode_mask);
+
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+ StaticVisitor::VisitPointer(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+
+ for (; !it.done(); it.next()) {
+ it.rinfo()->template Visit<StaticVisitor>(heap);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_OBJECTS_VISITING_H_
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 5612732303..6085b4ef25 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -39,9 +39,7 @@
#include "hydrogen.h"
#include "objects-inl.h"
#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
#include "macro-assembler.h"
-#include "mark-compact.h"
#include "safepoint-table.h"
#include "string-stream.h"
#include "utils.h"
@@ -134,20 +132,27 @@ Object* Object::ToBoolean() {
void Object::Lookup(String* name, LookupResult* result) {
Object* holder = NULL;
- if (IsJSReceiver()) {
- holder = this;
+ if (IsSmi()) {
+ Context* global_context = Isolate::Current()->context()->global_context();
+ holder = global_context->number_function()->instance_prototype();
} else {
+ HeapObject* heap_object = HeapObject::cast(this);
+ if (heap_object->IsJSObject()) {
+ return JSObject::cast(this)->Lookup(name, result);
+ } else if (heap_object->IsJSProxy()) {
+ return result->HandlerResult();
+ }
Context* global_context = Isolate::Current()->context()->global_context();
- if (IsNumber()) {
- holder = global_context->number_function()->instance_prototype();
- } else if (IsString()) {
+ if (heap_object->IsString()) {
holder = global_context->string_function()->instance_prototype();
- } else if (IsBoolean()) {
+ } else if (heap_object->IsHeapNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (heap_object->IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
}
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
- JSReceiver::cast(holder)->Lookup(name, result);
+ JSObject::cast(holder)->Lookup(name, result);
}
@@ -162,9 +167,10 @@ MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
}
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name) {
+MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
+ Object* structure,
+ String* name,
+ Object* holder) {
Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -185,9 +191,10 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
+ JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- CustomArguments args(isolate, data->data(), self, this);
+ CustomArguments args(isolate, data->data(), self, holder_handle);
v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
@@ -205,9 +212,9 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
// __defineGetter__ callback
if (structure->IsFixedArray()) {
Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
+ if (getter->IsJSFunction()) {
+ return Object::GetPropertyWithDefinedGetter(receiver,
+ JSFunction::cast(getter));
}
// Getter is not a function.
return isolate->heap()->undefined_value();
@@ -218,64 +225,47 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
}
-MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
- String* name_raw) {
- Isolate* isolate = GetIsolate();
+MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
+ String* name_raw,
+ Object* handler_raw) {
+ Isolate* isolate = name_raw->GetIsolate();
HandleScope scope(isolate);
Handle<Object> receiver(receiver_raw);
Handle<Object> name(name_raw);
+ Handle<Object> handler(handler_raw);
- Handle<Object> args[] = { receiver, name };
- Handle<Object> result = CallTrap(
- "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
if (isolate->has_pending_exception()) return Failure::Exception();
+ if (trap->IsUndefined()) {
+ // Get the derived `get' property.
+ trap = isolate->derived_get_trap();
+ }
- return *result;
-}
-
-
-MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
- uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return GetPropertyWithHandler(receiver, name);
-}
-
-
-MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return SetPropertyWithHandler(name, value, NONE, strict_mode);
-}
-
+ // Call trap function.
+ Object** args[] = { receiver.location(), name.location() };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
-bool JSProxy::HasElementWithHandler(uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return HasPropertyWithHandler(name);
+ return *result;
}
MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
- JSReceiver* getter) {
+ JSFunction* getter) {
HandleScope scope;
- Handle<JSReceiver> fun(getter);
+ Handle<JSFunction> fun(JSFunction::cast(getter));
Handle<Object> self(receiver);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = fun->GetHeap()->isolate()->debug();
// Handle stepping into a getter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
- debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
}
#endif
-
bool has_pending_exception;
Handle<Object> result =
Execution::Call(fun, self, 0, NULL, &has_pending_exception);
@@ -300,8 +290,10 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
AccessorInfo* info = AccessorInfo::cast(obj);
if (info->all_can_read()) {
*attributes = result->GetAttributes();
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
+ return GetPropertyWithCallback(receiver,
+ result->GetCallbackObject(),
+ name,
+ result->holder());
}
}
break;
@@ -494,7 +486,7 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
}
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
- cell->set_value(cell->GetHeap()->the_hole_value());
+ cell->set_value(cell->heap()->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
Object* deleted = dictionary->DeleteProperty(entry, mode);
@@ -545,9 +537,7 @@ MaybeObject* Object::GetProperty(Object* receiver,
// holder in the prototype chain.
// Proxy handlers do not use the proxy's prototype, so we can skip this.
if (!result->IsHandler()) {
- Object* last = result->IsProperty()
- ? result->holder()
- : Object::cast(heap->null_value());
+ Object* last = result->IsProperty() ? result->holder() : heap->null_value();
ASSERT(this != this->GetPrototype());
for (Object* current = this; true; current = current->GetPrototype()) {
if (current->IsAccessCheckNeeded()) {
@@ -576,26 +566,30 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
*attributes = result->GetAttributes();
Object* value;
+ JSObject* holder = result->holder();
switch (result->type()) {
case NORMAL:
- value = result->holder()->GetNormalizedProperty(result);
+ value = holder->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case FIELD:
- value = result->holder()->FastPropertyAt(result->GetFieldIndex());
+ value = holder->FastPropertyAt(result->GetFieldIndex());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS:
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- case HANDLER:
- return result->proxy()->GetPropertyWithHandler(receiver, name);
+ return GetPropertyWithCallback(receiver,
+ result->GetCallbackObject(),
+ name,
+ holder);
+ case HANDLER: {
+ JSProxy* proxy = JSProxy::cast(this);
+ return GetPropertyWithHandler(receiver, name, proxy->handler());
+ }
case INTERCEPTOR: {
JSObject* recvr = JSObject::cast(receiver);
- return result->holder()->GetPropertyWithInterceptor(
- recvr, name, attributes);
+ return holder->GetPropertyWithInterceptor(recvr, name, attributes);
}
case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
@@ -619,21 +613,28 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
for (holder = this;
holder != heap->null_value();
holder = holder->GetPrototype()) {
- if (!holder->IsJSObject()) {
- Isolate* isolate = heap->isolate();
- Context* global_context = isolate->context()->global_context();
- if (holder->IsNumber()) {
- holder = global_context->number_function()->instance_prototype();
- } else if (holder->IsString()) {
- holder = global_context->string_function()->instance_prototype();
- } else if (holder->IsBoolean()) {
- holder = global_context->boolean_function()->instance_prototype();
- } else if (holder->IsJSProxy()) {
- return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
- } else {
- // Undefined and null have no indexed properties.
- ASSERT(holder->IsUndefined() || holder->IsNull());
- return heap->undefined_value();
+ if (holder->IsSmi()) {
+ Context* global_context = Isolate::Current()->context()->global_context();
+ holder = global_context->number_function()->instance_prototype();
+ } else {
+ HeapObject* heap_object = HeapObject::cast(holder);
+ if (!heap_object->IsJSObject()) {
+ Isolate* isolate = heap->isolate();
+ Context* global_context = isolate->context()->global_context();
+ if (heap_object->IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (heap_object->IsHeapNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (heap_object->IsBoolean()) {
+ holder = global_context->boolean_function()->instance_prototype();
+ } else if (heap_object->IsJSProxy()) {
+ // TODO(rossberg): do something
+ return heap->undefined_value(); // For now...
+ } else {
+ // Undefined and null have no indexed properties.
+ ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
+ return heap->undefined_value();
+ }
}
}
@@ -876,9 +877,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
- }
return true;
}
@@ -925,10 +923,6 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
- }
-
return true;
}
@@ -1004,7 +998,8 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
break;
}
case JS_WEAK_MAP_TYPE: {
- accumulator->Add("<JS WeakMap>");
+ int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
+ accumulator->Add("<JS WeakMap[%d]>", elements);
break;
}
case JS_REGEXP_TYPE: {
@@ -1032,7 +1027,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
// JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
default: {
Map* map_of_this = map();
- Heap* heap = GetHeap();
+ Heap* heap = map_of_this->heap();
Object* constructor = map_of_this->constructor();
bool printed = false;
if (constructor->IsHeapObject() &&
@@ -1054,6 +1049,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
global_object ? "Global Object: " : "",
vowel ? "n" : "");
accumulator->Put(str);
+ accumulator->Put('>');
printed = true;
}
}
@@ -1075,6 +1071,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
+ // if (!HEAP->InNewSpace(this)) PrintF("*", this);
Heap* heap = GetHeap();
if (!heap->Contains(this)) {
accumulator->Add("!!!INVALID POINTER!!!");
@@ -1097,7 +1094,7 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
}
switch (map()->instance_type()) {
case MAP_TYPE:
- accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
+ accumulator->Add("<Map>");
break;
case FIXED_ARRAY_TYPE:
accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
@@ -1105,9 +1102,6 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case BYTE_ARRAY_TYPE:
accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
break;
- case FREE_SPACE_TYPE:
- accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
- break;
case EXTERNAL_PIXEL_ARRAY_TYPE:
accumulator->Add("<ExternalPixelArray[%u]>",
ExternalPixelArray::cast(this)->length());
@@ -1283,7 +1277,6 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
case EXTERNAL_PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -1540,7 +1533,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
// If the old map is the global object map (from new Object()),
// then transitions are not added to it, so we are done.
- Heap* heap = GetHeap();
+ Heap* heap = old_map->heap();
if (old_map == heap->isolate()->context()->global_context()->
object_function()->map()) {
return function;
@@ -1616,7 +1609,7 @@ MaybeObject* JSObject::AddProperty(String* name,
StrictModeFlag strict_mode) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
- Heap* heap = GetHeap();
+ Heap* heap = map_of_this->heap();
if (!map_of_this->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return heap->undefined_value();
@@ -1665,14 +1658,6 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
// found. Use set property to handle all these cases.
return SetProperty(&result, name, value, attributes, strict_mode);
}
- bool found = false;
- MaybeObject* result_object;
- result_object = SetPropertyWithCallbackSetterInPrototypes(name,
- value,
- attributes,
- &found,
- strict_mode);
- if (found) return result_object;
// Add a new real property.
return AddProperty(name, value, attributes, strict_mode);
}
@@ -1711,7 +1696,7 @@ MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
return result;
}
// Do not add transitions to the map of "new Object()".
- if (map() == GetIsolate()->context()->global_context()->
+ if (map() == old_map->heap()->isolate()->context()->global_context()->
object_function()->map()) {
return result;
}
@@ -1895,9 +1880,8 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsFixedArray()) {
Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
+ if (setter->IsJSFunction()) {
+ return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
@@ -1916,24 +1900,22 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
}
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value) {
+MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
+ Object* value) {
Isolate* isolate = GetIsolate();
Handle<Object> value_handle(value, isolate);
- Handle<JSReceiver> fun(setter, isolate);
- Handle<JSReceiver> self(this, isolate);
+ Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
+ Handle<JSObject> self(this, isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
- debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ if (debug->StepInActive()) {
+ debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
}
#endif
bool has_pending_exception;
- Handle<Object> argv[] = { value_handle };
- Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Object** argv[] = { value_handle.location() };
+ Execution::Call(fun, self, 1, argv, &has_pending_exception);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *value_handle;
@@ -1946,9 +1928,6 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
for (Object* pt = GetPrototype();
pt != heap->null_value();
pt = pt->GetPrototype()) {
- if (pt->IsJSProxy()) {
- return result->HandlerResult(JSProxy::cast(pt));
- }
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty()) {
if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
@@ -1969,16 +1948,6 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
for (Object* pt = GetPrototype();
pt != heap->null_value();
pt = pt->GetPrototype()) {
- if (pt->IsJSProxy()) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) {
- *found = true; // Force abort
- return maybe;
- }
- return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
- name, value, NONE, strict_mode, found);
- }
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
}
@@ -2000,60 +1969,6 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
return heap->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- bool* found,
- StrictModeFlag strict_mode) {
- LookupResult result;
- LookupCallbackSetterInPrototypes(name, &result);
- Heap* heap = GetHeap();
- if (result.IsFound()) {
- *found = true;
- if (result.type() == CALLBACKS) {
- return SetPropertyWithCallback(result.GetCallbackObject(),
- name,
- value,
- result.holder(),
- strict_mode);
- } else if (result.type() == HANDLER) {
- // We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result;
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsFound()) {
- if (accessor_result.type() == CALLBACKS) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder(),
- strict_mode);
- } else if (accessor_result.type() == HANDLER) {
- // There is a proxy in the prototype chain. Invoke its
- // getPropertyDescriptor trap.
- bool found = false;
- // SetPropertyWithHandlerIfDefiningSetter can cause GC,
- // make sure to use the handlified references after calling
- // the function.
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> hvalue(value);
- MaybeObject* result =
- accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
- name, value, attributes, strict_mode, &found);
- if (found) return result;
- // The proxy does not define the property as an accessor.
- // Consequently, it has no effect on setting the receiver.
- return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
- }
- }
- }
- }
- *found = false;
- return heap->the_hole_value();
-}
-
void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors();
@@ -2070,8 +1985,7 @@ void Map::LookupInDescriptors(JSObject* holder,
String* name,
LookupResult* result) {
DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache =
- GetHeap()->isolate()->descriptor_lookup_cache();
+ DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
int number = cache->Lookup(descriptors, name);
if (number == DescriptorLookupCache::kAbsent) {
number = descriptors->Search(name);
@@ -2085,239 +1999,75 @@ void Map::LookupInDescriptors(JSObject* holder,
}
-static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
- ElementsKind elements_kind) {
- if (descriptor_contents->IsMap()) {
- Map* map = Map::cast(descriptor_contents);
- if (map->elements_kind() == elements_kind) {
- return map;
- }
- return NULL;
- }
-
- FixedArray* map_array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < map_array->length(); ++i) {
- Object* current = map_array->get(i);
- // Skip undefined slots, they are sentinels for reclaimed maps.
- if (!current->IsUndefined()) {
- Map* current_map = Map::cast(map_array->get(i));
- if (current_map->elements_kind() == elements_kind) {
- return current_map;
- }
- }
- }
-
- return NULL;
-}
-
-
-static MaybeObject* AddElementsTransitionMapToDescriptor(
- Object* descriptor_contents,
- Map* new_map) {
- // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
- // simply add the map.
- if (descriptor_contents == NULL) {
- return new_map;
- }
-
- // There was already a map in the descriptor, create a 2-element FixedArray
- // to contain the existing map plus the new one.
- FixedArray* new_array;
- Heap* heap = new_map->GetHeap();
- if (descriptor_contents->IsMap()) {
- // Must tenure, DescriptorArray expects no new-space objects.
- MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
- if (!maybe_new_array->To<FixedArray>(&new_array)) {
- return maybe_new_array;
- }
- new_array->set(0, descriptor_contents);
- new_array->set(1, new_map);
- return new_array;
- }
-
- // The descriptor already contained a list of maps for different ElementKinds
- // of ELEMENTS_TRANSITION, first check the existing array for an undefined
- // slot, and if that's not available, create a FixedArray to hold the existing
- // maps plus the new one and fill it in.
- FixedArray* array = FixedArray::cast(descriptor_contents);
- for (int i = 0; i < array->length(); ++i) {
- if (array->get(i)->IsUndefined()) {
- array->set(i, new_map);
- return array;
- }
- }
-
- // Must tenure, DescriptorArray expects no new-space objects.
- MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(array->length() + 1, TENURED);
- if (!maybe_new_array->To<FixedArray>(&new_array)) {
- return maybe_new_array;
- }
- int i = 0;
- while (i < array->length()) {
- new_array->set(i, array->get(i));
- ++i;
- }
- new_array->set(i, new_map);
- return new_array;
-}
-
-
-String* Map::elements_transition_sentinel_name() {
- return GetHeap()->empty_symbol();
-}
-
-
-Object* Map::GetDescriptorContents(String* sentinel_name,
- bool* safe_to_add_transition) {
- // Get the cached index for the descriptors lookup, or find and cache it.
+MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
+ bool safe_to_add_transition) {
+ Heap* current_heap = heap();
DescriptorArray* descriptors = instance_descriptors();
- DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
- int index = cache->Lookup(descriptors, sentinel_name);
- if (index == DescriptorLookupCache::kAbsent) {
- index = descriptors->Search(sentinel_name);
- cache->Update(descriptors, sentinel_name, index);
- }
- // If the transition already exists, return its descriptor.
- if (index != DescriptorArray::kNotFound) {
- PropertyDetails details(descriptors->GetDetails(index));
- if (details.type() == ELEMENTS_TRANSITION) {
- return descriptors->GetValue(index);
- } else {
- *safe_to_add_transition = false;
- }
- }
- return NULL;
-}
-
-
-Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
- bool* safe_to_add_transition) {
- // Special case: indirect SMI->FAST transition (cf. comment in
- // AddElementsTransition()).
- if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- elements_kind == FAST_ELEMENTS) {
- Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
- safe_to_add_transition);
- if (double_map == NULL) return double_map;
- return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
- safe_to_add_transition);
- }
- Object* descriptor_contents = GetDescriptorContents(
- elements_transition_sentinel_name(), safe_to_add_transition);
- if (descriptor_contents != NULL) {
- Map* maybe_transition_map =
- GetElementsTransitionMapFromDescriptor(descriptor_contents,
- elements_kind);
- ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
- return maybe_transition_map;
- }
- return NULL;
-}
-
-
-MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
- Map* transitioned_map) {
- // The map transition graph should be a tree, therefore the transition
- // from SMI to FAST elements is not done directly, but by going through
- // DOUBLE elements first.
- if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
- elements_kind == FAST_ELEMENTS) {
- bool safe_to_add = true;
- Map* double_map = this->LookupElementsTransitionMap(
- FAST_DOUBLE_ELEMENTS, &safe_to_add);
- // This method is only called when safe_to_add_transition has been found
- // to be true earlier.
- ASSERT(safe_to_add);
-
- if (double_map == NULL) {
- MaybeObject* maybe_map = this->CopyDropTransitions();
- if (!maybe_map->To(&double_map)) return maybe_map;
- double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
- MaybeObject* maybe_double_transition = this->AddElementsTransition(
- FAST_DOUBLE_ELEMENTS, double_map);
- if (maybe_double_transition->IsFailure()) return maybe_double_transition;
- }
- return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
- }
-
- bool safe_to_add_transition = true;
- Object* descriptor_contents = GetDescriptorContents(
- elements_transition_sentinel_name(), &safe_to_add_transition);
- // This method is only called when safe_to_add_transition has been found
- // to be true earlier.
- ASSERT(safe_to_add_transition);
- MaybeObject* maybe_new_contents =
- AddElementsTransitionMapToDescriptor(descriptor_contents,
- transitioned_map);
- Object* new_contents;
- if (!maybe_new_contents->ToObject(&new_contents)) {
- return maybe_new_contents;
- }
-
- ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
- new_contents);
- Object* new_descriptors;
- MaybeObject* maybe_new_descriptors =
- instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
- if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
- return maybe_new_descriptors;
- }
- set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- return this;
-}
-
-
-MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind to_kind) {
- Map* current_map = map();
- ElementsKind from_kind = current_map->elements_kind();
-
- if (from_kind == to_kind) return current_map;
-
- // Only objects with FastProperties can have DescriptorArrays and can track
- // element-related maps. Also don't add descriptors to maps that are shared.
- bool safe_to_add_transition = HasFastProperties() &&
- !current_map->IsUndefined() &&
- !current_map->is_shared();
-
- // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
- // with elements that switch back and forth between dictionary and fast
- // element mode.
- if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
- safe_to_add_transition = false;
- }
+ String* elements_transition_sentinel_name = current_heap->empty_symbol();
if (safe_to_add_transition) {
// It's only safe to manipulate the descriptor array if it would be
// safe to add a transition.
- Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
- to_kind, &safe_to_add_transition);
- if (maybe_transition_map != NULL) {
- return maybe_transition_map;
+
+ ASSERT(!is_shared()); // no transitions can be added to shared maps.
+ // Check if the elements transition already exists.
+ DescriptorLookupCache* cache =
+ current_heap->isolate()->descriptor_lookup_cache();
+ int index = cache->Lookup(descriptors, elements_transition_sentinel_name);
+ if (index == DescriptorLookupCache::kAbsent) {
+ index = descriptors->Search(elements_transition_sentinel_name);
+ cache->Update(descriptors,
+ elements_transition_sentinel_name,
+ index);
+ }
+
+ // If the transition already exists, check the type. If there is a match,
+ // return it.
+ if (index != DescriptorArray::kNotFound) {
+ PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
+ if (details.type() == ELEMENTS_TRANSITION &&
+ details.elements_kind() == elements_kind) {
+ return descriptors->GetValue(index);
+ } else {
+ safe_to_add_transition = false;
+ }
}
}
- Map* new_map = NULL;
-
// No transition to an existing map for the given ElementsKind. Make a new
// one.
- { MaybeObject* maybe_map = current_map->CopyDropTransitions();
- if (!maybe_map->To(&new_map)) return maybe_map;
+ Object* obj;
+ { MaybeObject* maybe_map = CopyDropTransitions();
+ if (!maybe_map->ToObject(&obj)) return maybe_map;
}
+ Map* new_map = Map::cast(obj);
- new_map->set_elements_kind(to_kind);
+ new_map->set_elements_kind(elements_kind);
+ GetIsolate()->counters()->map_to_external_array_elements()->Increment();
// Only remember the map transition if the object's map is NOT equal to the
// global object_function's map and there is not an already existing
// non-matching element transition.
- bool allow_map_transition = safe_to_add_transition &&
+ bool allow_map_transition =
+ safe_to_add_transition &&
(GetIsolate()->context()->global_context()->object_function()->map() !=
map());
if (allow_map_transition) {
- MaybeObject* maybe_transition =
- current_map->AddElementsTransition(to_kind, new_map);
- if (maybe_transition->IsFailure()) return maybe_transition;
+ // Allocate new instance descriptors for the old map with map transition.
+ ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
+ Map::cast(new_map),
+ elements_kind);
+ Object* new_descriptors;
+ MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
+ &desc,
+ KEEP_TRANSITIONS);
+ if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
+ return maybe_new_descriptors;
+ }
+ descriptors = DescriptorArray::cast(new_descriptors);
+ set_instance_descriptors(descriptors);
}
+
return new_map;
}
@@ -2328,7 +2078,6 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- // A GlobalProxy's prototype should always be a proper JSObject.
return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
}
@@ -2455,7 +2204,7 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
if (result->IsFound() && result->type() == HANDLER) {
- return result->proxy()->SetPropertyWithHandler(
+ return JSProxy::cast(this)->SetPropertyWithHandler(
key, value, attributes, strict_mode);
} else {
return JSObject::cast(this)->SetPropertyForResult(
@@ -2469,11 +2218,22 @@ bool JSProxy::HasPropertyWithHandler(String* name_raw) {
HandleScope scope(isolate);
Handle<Object> receiver(this);
Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
if (isolate->has_pending_exception()) return Failure::Exception();
+ if (trap->IsUndefined()) {
+ trap = isolate->derived_has_trap();
+ }
+
+ // Call trap function.
+ Object** args[] = { name.location() };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
return result->ToBoolean()->IsTrue();
}
@@ -2489,82 +2249,24 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
Handle<Object> receiver(this);
Handle<Object> name(name_raw);
Handle<Object> value(value_raw);
+ Handle<Object> handler(this->handler());
- Handle<Object> args[] = { receiver, name, value };
- CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
-
- return *value;
-}
-
-
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
- String* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* found) {
- *found = true; // except where defined otherwise...
- Isolate* isolate = GetHeap()->isolate();
- Handle<JSProxy> proxy(this);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
- Handle<Object> args[] = { name };
- Handle<Object> result = proxy->CallTrap(
- "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
if (isolate->has_pending_exception()) return Failure::Exception();
-
- if (!result->IsUndefined()) {
- // The proxy handler cares about this property.
- // Check whether it is virtualized as an accessor.
- // Emulate [[GetProperty]] semantics for proxies.
- bool has_pending_exception;
- Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
-
- Handle<String> conf_name =
- isolate->factory()->LookupAsciiSymbol("configurable_");
- Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
- ASSERT(!isolate->has_pending_exception());
- if (configurable->IsFalse()) {
- Handle<Object> args[] = { Handle<Object>(proxy->handler()), proxy, name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- ASSERT(configurable->IsTrue());
-
- // Check for AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
- Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
- ASSERT(!isolate->has_pending_exception());
- if (!setter->IsUndefined()) {
- // We have a setter -- invoke it.
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return proxy->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
- } else {
- Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
- Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
- ASSERT(!isolate->has_pending_exception());
- if (!getter->IsUndefined()) {
- // We have a getter but no setter -- the property may not be
- // written. In strict mode, throw an error.
- if (strict_mode == kNonStrictMode) return *value;
- Handle<Object> args[] = { name, proxy };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
- }
- }
- // Fall-through.
+ if (trap->IsUndefined()) {
+ trap = isolate->derived_set_trap();
}
- // The proxy does not define the property as an accessor.
- *found = false;
+ // Call trap function.
+ Object** args[] = {
+ receiver.location(), name.location(), value.location()
+ };
+ bool has_exception;
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
+
return *value;
}
@@ -2575,16 +2277,31 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
HandleScope scope(isolate);
Handle<Object> receiver(this);
Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "delete", Handle<Object>(), ARRAY_SIZE(args), args);
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
if (isolate->has_pending_exception()) return Failure::Exception();
+ if (trap->IsUndefined()) {
+ Handle<Object> args[] = { handler, trap_name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Failure::Exception();
+ }
+
+ // Call trap function.
+ Object** args[] = { name.location() };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
Object* bool_result = result->ToBoolean();
- if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
- Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
+ if (mode == STRICT_DELETION &&
+ bool_result == isolate->heap()->false_value()) {
+ Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
@@ -2594,73 +2311,39 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
}
-MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(*name, mode);
-}
-
-
MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
JSReceiver* receiver_raw,
- String* name_raw) {
+ String* name_raw,
+ bool* has_exception) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
Handle<JSReceiver> receiver(receiver_raw);
Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
- Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
- "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return NONE;
-
- if (result->IsUndefined()) return ABSENT;
-
- bool has_pending_exception;
- Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return NONE;
-
- // Convert result to PropertyAttributes.
- Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
- Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
- if (isolate->has_pending_exception()) return NONE;
- Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
- Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
- if (isolate->has_pending_exception()) return NONE;
- Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
- Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+ // Extract trap function.
+ Handle<String> trap_name =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
if (isolate->has_pending_exception()) return NONE;
-
- if (configurable->IsFalse()) {
- Handle<Object> args[] = { Handle<Object>(proxy->handler()), proxy, name };
+ if (trap->IsUndefined()) {
+ Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
- "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+ "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
isolate->Throw(*error);
+ *has_exception = true;
return NONE;
}
- int attributes = NONE;
- if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
- if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
- if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
- return static_cast<PropertyAttributes>(attributes);
-}
-
+ // Call trap function.
+ Object** args[] = { name.location() };
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
+ if (has_exception) return NONE;
-MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- return GetPropertyAttributeWithHandler(receiver, *name);
+ // TODO(rossberg): convert result to PropertyAttributes
+ USE(result);
+ return NONE;
}
@@ -2669,9 +2352,6 @@ void JSProxy::Fix() {
HandleScope scope(isolate);
Handle<JSProxy> self(this);
- // Save identity hash.
- MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
-
if (IsJSFunctionProxy()) {
isolate->factory()->BecomeJSFunction(self);
// Code will be set on the JavaScript side.
@@ -2679,42 +2359,9 @@ void JSProxy::Fix() {
isolate->factory()->BecomeJSObject(self);
}
ASSERT(self->IsJSObject());
-
- // Inherit identity, if it was present.
- Object* hash;
- if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
- Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, hash);
- }
}
-MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
- Handle<Object> derived,
- int argc,
- Handle<Object> argv[]) {
- Isolate* isolate = GetIsolate();
- Handle<Object> handler(this->handler());
-
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
- if (isolate->has_pending_exception()) return trap;
-
- if (trap->IsUndefined()) {
- if (derived.is_null()) {
- Handle<Object> args[] = { handler, trap_name };
- Handle<Object> error = isolate->factory()->NewTypeError(
- "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
- isolate->Throw(*error);
- return Handle<Object>();
- }
- trap = Handle<Object>(derived);
- }
-
- bool threw;
- return Execution::Call(trap, handler, argc, argv, &threw);
-}
-
MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
String* name,
@@ -2739,46 +2386,48 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
}
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(
- result, name, value, true, strict_mode);
- }
+ if (IsAccessCheckNeeded()
+ && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(result,
+ name,
+ value,
+ true,
+ strict_mode);
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetPropertyForResult(
+ return JSObject::cast(proto)->SetProperty(
result, name, value, attributes, strict_mode);
}
if (!result->IsProperty() && !IsJSContextExtensionObject()) {
- bool found = false;
- MaybeObject* result_object;
- result_object = SetPropertyWithCallbackSetterInPrototypes(name,
- value,
- attributes,
- &found,
- strict_mode);
- if (found) return result_object;
+ // We could not find a local property so let's check whether there is an
+ // accessor that wants to handle the property.
+ LookupResult accessor_result;
+ LookupCallbackSetterInPrototypes(name, &accessor_result);
+ if (accessor_result.IsProperty()) {
+ return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+ name,
+ value,
+ accessor_result.holder(),
+ strict_mode);
+ }
}
-
- // At this point, no GC should have happened, as this would invalidate
- // 'result', which we cannot handlify!
-
if (!result->IsFound()) {
// Neither properties nor transitions found.
return AddProperty(name, value, attributes, strict_mode);
}
if (result->IsReadOnly() && result->IsProperty()) {
if (strict_mode == kStrictMode) {
- Handle<JSObject> self(this);
- Handle<String> hname(name);
- Handle<Object> args[] = { hname, self };
+ HandleScope scope(heap->isolate());
+ Handle<String> key(name);
+ Handle<Object> holder(this);
+ Handle<Object> args[2] = { key, holder };
return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ "strict_read_only_property", HandleVector(args, 2)));
} else {
return value;
}
@@ -3007,8 +2656,9 @@ PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
String* key) {
uint32_t index = 0;
if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
- ? NONE : ABSENT;
+ if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
+ return NONE;
+ return ABSENT;
}
// Named property.
LookupResult result;
@@ -3038,8 +2688,10 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
case CALLBACKS:
return result->GetAttributes();
case HANDLER: {
- return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
- receiver, name);
+ // TODO(rossberg): propagate exceptions properly.
+ bool has_exception = false;
+ return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
+ receiver, name, &has_exception);
}
case INTERCEPTOR:
return result->holder()->GetPropertyAttributeWithInterceptor(
@@ -3205,7 +2857,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
}
- Heap* current_heap = GetHeap();
+ Heap* current_heap = map_of_this->heap();
// Copy the next enumeration index from instance descriptor.
int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
@@ -3227,10 +2879,6 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
ASSERT(instance_size_delta >= 0);
current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta);
- }
-
set_map(new_map);
new_map->clear_instance_descriptors();
@@ -3264,14 +2912,13 @@ MaybeObject* JSObject::NormalizeElements() {
FixedArrayBase* array = FixedArrayBase::cast(elements());
Map* old_map = array->map();
bool is_arguments =
- (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
+ (old_map == old_map->heap()->non_strict_arguments_elements_map());
if (is_arguments) {
array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
}
if (array->IsDictionary()) return array;
ASSERT(HasFastElements() ||
- HasFastSmiOnlyElements() ||
HasFastDoubleElements() ||
HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
@@ -3306,8 +2953,7 @@ MaybeObject* JSObject::NormalizeElements() {
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
} else {
- ASSERT(old_map->has_fast_elements() ||
- old_map->has_fast_smi_only_elements());
+ ASSERT(old_map->has_fast_elements());
value = FixedArray::cast(array)->get(i);
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -3327,14 +2973,13 @@ MaybeObject* JSObject::NormalizeElements() {
// Set the new map first to satify the elements type assert in
// set_elements().
Object* new_map;
- MaybeObject* maybe = GetElementsTransitionMap(DICTIONARY_ELEMENTS);
+ MaybeObject* maybe = map()->GetSlowElementsMap();
if (!maybe->ToObject(&new_map)) return maybe;
set_map(Map::cast(new_map));
set_elements(dictionary);
}
- old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
+ old_map->isolate()->counters()->elements_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -3348,219 +2993,95 @@ MaybeObject* JSObject::NormalizeElements() {
}
-Smi* JSReceiver::GenerateIdentityHash() {
+MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
Isolate* isolate = GetIsolate();
-
- int hash_value;
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = V8::Random(isolate) & Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
-
- return Smi::FromInt(hash_value);
-}
-
-
-MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
- MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
- hash);
- if (maybe->IsFailure()) return maybe;
- return this;
-}
-
-
-MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
- if (stored_value->IsSmi()) return stored_value;
-
- Smi* hash = GenerateIdentityHash();
- MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
- hash);
- if (result->IsFailure()) return result;
- if (result->ToObjectUnchecked()->IsUndefined()) {
- // Trying to get hash of detached proxy.
- return Smi::FromInt(0);
- }
- return hash;
-}
-
-
-MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
- Object* hash = this->hash();
- if (!hash->IsSmi() && flag == ALLOW_CREATION) {
- hash = GenerateIdentityHash();
- set_hash(hash);
- }
- return hash;
-}
-
-
-Object* JSObject::GetHiddenProperty(String* key) {
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proxy_parent->IsJSGlobalObject());
- return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
- }
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
- ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
- if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->undefined_value();
- }
- StringDictionary* dictionary =
- StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
- int entry = dictionary->FindEntry(key);
- if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
- return dictionary->ValueAt(entry);
-}
-
-
-MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proxy_parent->IsJSGlobalObject());
- return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
- }
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
- StringDictionary* dictionary;
- if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
-
- // If it was found, check if the key is already in the dictionary.
- int entry = dictionary->FindEntry(key);
- if (entry != StringDictionary::kNotFound) {
- // If key was found, just update the value.
- dictionary->ValueAtPut(entry, value);
- return this;
- }
- // Key was not already in the dictionary, so add the entry.
- MaybeObject* insert_result = dictionary->Add(key,
- value,
- PropertyDetails(NONE, NORMAL));
- StringDictionary* new_dict;
- if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
- if (new_dict != dictionary) {
- // If adding the key expanded the dictionary (i.e., Add returned a new
- // dictionary), store it back to the object.
- MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
- if (store_result->IsFailure()) return store_result;
- }
- // Return this to mark success.
- return this;
-}
-
-
-void JSObject::DeleteHiddenProperty(String* key) {
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return immediately.
- if (proxy_parent->IsNull()) return;
- ASSERT(proxy_parent->IsJSGlobalObject());
- JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
- return;
- }
- MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
- ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
- if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
- StringDictionary* dictionary =
- StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
- int entry = dictionary->FindEntry(key);
- if (entry == StringDictionary::kNotFound) {
- // Key wasn't in dictionary. Deletion is a success.
- return;
- }
- // Key was in the dictionary. Remove it.
- dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
-}
-
-
-bool JSObject::HasHiddenProperties() {
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_symbol(),
- false) != ABSENT;
-}
-
-
-MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
- ASSERT(!IsJSGlobalProxy());
- if (HasFastProperties()) {
+ Heap* heap = isolate->heap();
+ Object* holder = BypassGlobalProxy();
+ if (holder->IsUndefined()) return heap->undefined_value();
+ JSObject* obj = JSObject::cast(holder);
+ if (obj->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden symbol. Since the
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
+ DescriptorArray* descriptors = obj->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
+ (descriptors->GetKey(0) == heap->hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
- Object* hidden_store =
- this->FastPropertyAt(descriptors->GetFieldIndex(0));
- return StringDictionary::cast(hidden_store);
+ return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
}
- } else {
- PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden symbol,
- // so we can be sure that GetLocalPropertyPostInterceptor returns a real
- // object.
- Object* lookup =
- GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_symbol(),
- &attributes)->ToObjectUnchecked();
- if (!lookup->IsUndefined()) {
- return StringDictionary::cast(lookup);
- }
- }
- if (!create_if_absent) return GetHeap()->undefined_value();
- const int kInitialSize = 5;
- MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
- StringDictionary* dictionary;
- if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- dictionary,
- DONT_ENUM,
- kNonStrictMode);
- if (store_result->IsFailure()) return store_result;
- return dictionary;
+ }
+
+ // Only attempt to find the hidden properties in the local object and not
+ // in the prototype chain.
+ if (!obj->HasHiddenPropertiesObject()) {
+ // Hidden properties object not found. Allocate a new hidden properties
+ // object if requested. Otherwise return the undefined value.
+ if (flag == ALLOW_CREATION) {
+ Object* hidden_obj;
+ { MaybeObject* maybe_obj = heap->AllocateJSObject(
+ isolate->context()->global_context()->object_function());
+ if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
+ }
+ // Don't allow leakage of the hidden object through accessors
+ // on Object.prototype.
+ {
+ MaybeObject* maybe_obj =
+ JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+ }
+ return obj->SetHiddenPropertiesObject(hidden_obj);
+ } else {
+ return heap->undefined_value();
+ }
+ }
+ return obj->GetHiddenPropertiesObject();
}
-MaybeObject* JSObject::SetHiddenPropertiesDictionary(
- StringDictionary* dictionary) {
- ASSERT(!IsJSGlobalProxy());
- ASSERT(HasHiddenProperties());
- if (HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
- if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
- descriptors->IsProperty(0)) {
- ASSERT(descriptors->GetType(0) == FIELD);
- this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
- return this;
+MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
+ Isolate* isolate = GetIsolate();
+ Object* hidden_props_obj;
+ { MaybeObject* maybe_obj = GetHiddenProperties(flag);
+ if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
+ }
+ if (!hidden_props_obj->IsJSObject()) {
+ // We failed to create hidden properties. That's a detached
+ // global proxy.
+ ASSERT(hidden_props_obj->IsUndefined());
+ return Smi::FromInt(0);
+ }
+ JSObject* hidden_props = JSObject::cast(hidden_props_obj);
+ String* hash_symbol = isolate->heap()->identity_hash_symbol();
+ {
+ // Note that HasLocalProperty() can cause a GC in the general case in the
+ // presence of interceptors.
+ AssertNoAllocation no_alloc;
+ if (hidden_props->HasLocalProperty(hash_symbol)) {
+ MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
+ return Smi::cast(hash->ToObjectChecked());
}
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
- dictionary,
- DONT_ENUM,
- kNonStrictMode);
- if (store_result->IsFailure()) return store_result;
- return this;
+
+ int hash_value;
+ int attempts = 0;
+ do {
+ // Generate a random 32-bit hash value but limit range to fit
+ // within a smi.
+ hash_value = V8::Random(isolate) & Smi::kMaxValue;
+ attempts++;
+ } while (hash_value == 0 && attempts < 30);
+ hash_value = hash_value != 0 ? hash_value : 1; // never return 0
+
+ Smi* hash = Smi::FromInt(hash_value);
+ { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
+ hash_symbol,
+ hash,
+ static_cast<PropertyAttributes>(None));
+ if (result->IsFailure()) return result;
+ }
+ return hash;
}
@@ -3680,16 +3201,9 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
if (IsJSProxy()) {
return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+ } else {
+ return JSObject::cast(this)->DeleteProperty(name, mode);
}
- return JSObject::cast(this)->DeleteProperty(name, mode);
-}
-
-
-MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
- }
- return JSObject::cast(this)->DeleteElement(index, mode);
}
@@ -3753,8 +3267,7 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
- ASSERT(kind == FAST_ELEMENTS ||
- kind == DICTIONARY_ELEMENTS);
+ ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
if (kind == FAST_ELEMENTS) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
@@ -3774,7 +3287,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
// Check whether this object references another object.
bool JSObject::ReferencesObject(Object* obj) {
Map* map_of_this = map();
- Heap* heap = GetHeap();
+ Heap* heap = map_of_this->heap();
AssertNoAllocation no_alloc;
// Is the object the constructor for this object?
@@ -3809,8 +3322,6 @@ bool JSObject::ReferencesObject(Object* obj) {
// Raw pixels and external arrays do not reference other
// objects.
break;
- case FAST_SMI_ONLY_ELEMENTS:
- break;
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(this->elements());
@@ -3998,6 +3509,15 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
void JSReceiver::LocalLookup(String* name, LookupResult* result) {
+ if (IsJSProxy()) {
+ result->HandlerResult();
+ } else {
+ JSObject::cast(this)->LocalLookup(name, result);
+ }
+}
+
+
+void JSObject::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
Heap* heap = GetHeap();
@@ -4006,36 +3526,28 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSReceiver::cast(proto)->LocalLookup(name, result);
- }
-
- if (IsJSProxy()) {
- result->HandlerResult(JSProxy::cast(this));
- return;
+ return JSObject::cast(proto)->LocalLookup(name, result);
}
// Do not use inline caching if the object is a non-global object
// that requires access checks.
- if (IsAccessCheckNeeded()) {
+ if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
result->DisallowCaching();
}
- JSObject* js_object = JSObject::cast(this);
-
// Check __proto__ before interceptor.
if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
- result->ConstantResult(js_object);
+ result->ConstantResult(this);
return;
}
// Check for lookup interceptor except when bootstrapping.
- if (js_object->HasNamedInterceptor() &&
- !heap->isolate()->bootstrapper()->IsActive()) {
- result->InterceptorResult(js_object);
+ if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
+ result->InterceptorResult(this);
return;
}
- js_object->LocalLookupRealNamedProperty(name, result);
+ LocalLookupRealNamedProperty(name, result);
}
@@ -4045,7 +3557,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
for (Object* current = this;
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name, result);
+ JSObject::cast(current)->LocalLookup(name, result);
if (result->IsProperty()) return;
}
result->NotFound();
@@ -4056,7 +3568,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
void JSObject::LookupCallback(String* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
- current != heap->null_value() && current->IsJSObject();
+ current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty() && result->type() == CALLBACKS) return;
@@ -4102,7 +3614,6 @@ MaybeObject* JSObject::DefineGetterSetter(String* name,
if (is_element) {
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
break;
@@ -4289,7 +3800,7 @@ MaybeObject* JSObject::DefineAccessor(String* name,
bool is_getter,
Object* fun,
PropertyAttributes attributes) {
- ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
+ ASSERT(fun->IsJSFunction() || fun->IsUndefined());
Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -4352,7 +3863,6 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
break;
@@ -4576,7 +4086,7 @@ MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
- { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
+ { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
set_code_cache(result);
@@ -4618,7 +4128,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// Traverse the transition tree without using a stack. We do this by
// reversing the pointers in the maps and descriptor arrays.
Map* current = this;
- Map* meta_map = GetHeap()->meta_map();
+ Map* meta_map = heap()->meta_map();
Object** map_or_index_field = NULL;
while (current != meta_map) {
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
@@ -4639,7 +4149,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// of the next map and recording the index in the transition array in
// the map field of the array.
Map* next = Map::cast(contents->get(i));
- next->set_map_unsafe(current);
+ next->set_map(current);
*map_or_index_field = Smi::FromInt(i + 2);
current = next;
map_done = false;
@@ -4664,23 +4174,23 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
Object* perhaps_map = prototype_transitions->get(i);
if (perhaps_map->IsMap()) {
Map* next = Map::cast(perhaps_map);
- next->set_map_unsafe(current);
+ next->set_map(current);
*proto_map_or_index_field =
Smi::FromInt(i + kProtoTransitionElementsPerEntry);
current = next;
continue;
}
}
- *proto_map_or_index_field = GetHeap()->fixed_array_map();
+ *proto_map_or_index_field = heap()->fixed_array_map();
if (map_or_index_field != NULL) {
- *map_or_index_field = GetHeap()->fixed_array_map();
+ *map_or_index_field = heap()->fixed_array_map();
}
// The callback expects a map to have a real map as its map, so we save
// the map field, which is being used to track the traversal and put the
// correct map (the meta_map) in place while we do the callback.
Map* prev = current->map();
- current->set_map_unsafe(meta_map);
+ current->set_map(meta_map);
callback(current, data);
current = prev;
}
@@ -4896,7 +4406,7 @@ class CodeCacheHashTableKey : public HashTableKey {
MUST_USE_RESULT MaybeObject* AsObject() {
ASSERT(code_ != NULL);
Object* obj;
- { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
+ { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* pair = FixedArray::cast(obj);
@@ -6485,7 +5995,7 @@ bool String::MarkAsUndetectable() {
if (StringShape(this).IsSymbol()) return false;
Map* map = this->map();
- Heap* heap = GetHeap();
+ Heap* heap = map->heap();
if (map == heap->string_map()) {
this->set_map(heap->undetectable_string_map());
return true;
@@ -6688,45 +6198,29 @@ void String::PrintOn(FILE* file) {
}
-void Map::CreateOneBackPointer(Map* target) {
-#ifdef DEBUG
- // Verify target.
- Object* source_prototype = prototype();
- Object* target_prototype = target->prototype();
- ASSERT(source_prototype->IsJSReceiver() ||
- source_prototype->IsMap() ||
- source_prototype->IsNull());
- ASSERT(target_prototype->IsJSReceiver() ||
- target_prototype->IsNull());
- ASSERT(source_prototype->IsMap() ||
- source_prototype == target_prototype);
-#endif
- // Point target back to source. set_prototype() will not let us set
- // the prototype to a map, as we do here.
- *RawField(target, kPrototypeOffset) = this;
-}
-
-
void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
if (descriptors->GetType(i) == MAP_TRANSITION ||
descriptors->GetType(i) == ELEMENTS_TRANSITION ||
descriptors->GetType(i) == CONSTANT_TRANSITION) {
- Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i));
- if (object->IsMap()) {
- CreateOneBackPointer(reinterpret_cast<Map*>(object));
- } else {
- ASSERT(object->IsFixedArray());
- ASSERT(descriptors->GetType(i) == ELEMENTS_TRANSITION);
- FixedArray* array = reinterpret_cast<FixedArray*>(object);
- for (int i = 0; i < array->length(); ++i) {
- Map* target = reinterpret_cast<Map*>(array->get(i));
- if (!target->IsUndefined()) {
- CreateOneBackPointer(target);
- }
- }
- }
+ // Get target.
+ Map* target = Map::cast(descriptors->GetValue(i));
+#ifdef DEBUG
+ // Verify target.
+ Object* source_prototype = prototype();
+ Object* target_prototype = target->prototype();
+ ASSERT(source_prototype->IsJSObject() ||
+ source_prototype->IsMap() ||
+ source_prototype->IsNull());
+ ASSERT(target_prototype->IsJSObject() ||
+ target_prototype->IsNull());
+ ASSERT(source_prototype->IsMap() ||
+ source_prototype == target_prototype);
+#endif
+ // Point target back to source. set_prototype() will not let us set
+ // the prototype to a map, as we do here.
+ *RawField(target, kPrototypeOffset) = this;
}
}
}
@@ -6753,46 +6247,16 @@ void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
if (details.type() == MAP_TRANSITION ||
details.type() == ELEMENTS_TRANSITION ||
details.type() == CONSTANT_TRANSITION) {
- Object* object = reinterpret_cast<Object*>(contents->get(i));
- if (object->IsMap()) {
- Map* target = reinterpret_cast<Map*>(object);
- ASSERT(target->IsHeapObject());
- MarkBit map_mark = Marking::MarkBitFrom(target);
- if (!map_mark.Get()) {
- ASSERT(target->IsMap());
- contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(heap, i);
- ASSERT(target->prototype() == this ||
- target->prototype() == real_prototype);
- // Getter prototype() is read-only, set_prototype() has side effects.
- *RawField(target, Map::kPrototypeOffset) = real_prototype;
- }
- } else {
- ASSERT(object->IsFixedArray());
- ASSERT(details.type() == ELEMENTS_TRANSITION);
- FixedArray* array = reinterpret_cast<FixedArray*>(object);
- bool reachable_map_found = false;
- for (int j = 0; j < array->length(); ++j) {
- Map* target = reinterpret_cast<Map*>(array->get(j));
- ASSERT(target->IsHeapObject());
- MarkBit map_mark = Marking::MarkBitFrom(target);
- if (!map_mark.Get()) {
- ASSERT(target->IsMap());
- array->set_undefined(j);
- ASSERT(target->prototype() == this ||
- target->prototype() == real_prototype);
- // Getter prototype() is read-only, set_prototype() has side
- // effects.
- *RawField(target, Map::kPrototypeOffset) = real_prototype;
- } else if (target->IsMap()) {
- reachable_map_found = true;
- }
- }
- // If no map was found, make sure the FixedArray also gets collected.
- if (!reachable_map_found) {
- contents->set_unchecked(i + 1, NullDescriptorDetails);
- contents->set_null_unchecked(heap, i);
- }
+ Map* target = reinterpret_cast<Map*>(contents->get(i));
+ ASSERT(target->IsHeapObject());
+ if (!target->IsMarked()) {
+ ASSERT(target->IsMap());
+ contents->set_unchecked(i + 1, NullDescriptorDetails);
+ contents->set_null_unchecked(heap, i);
+ ASSERT(target->prototype() == this ||
+ target->prototype() == real_prototype);
+ // Getter prototype() is read-only, set_prototype() has side effects.
+ *RawField(target, Map::kPrototypeOffset) = real_prototype;
}
}
}
@@ -6898,7 +6362,7 @@ MaybeObject* JSFunction::SetPrototype(Object* value) {
if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
}
Map* new_map = Map::cast(new_object);
- Heap* heap = new_map->GetHeap();
+ Heap* heap = new_map->heap();
set_map(new_map);
new_map->set_constructor(value);
new_map->set_non_instance_prototype(true);
@@ -6929,7 +6393,7 @@ Object* JSFunction::RemovePrototype() {
ASSERT(shared()->strict_mode() || map() == global_context->function_map());
set_map(no_prototype_map);
- set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
+ set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
return this;
}
@@ -7222,8 +6686,6 @@ bool SharedFunctionInfo::VerifyBailoutId(int id) {
void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
ASSERT(!IsInobjectSlackTrackingInProgress());
- if (!FLAG_clever_optimizations) return;
-
// Only initiate the tracking the first time.
if (live_objects_may_exist()) return;
set_live_objects_may_exist(true);
@@ -7239,7 +6701,7 @@ void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
set_construction_count(kGenerousAllocationCount);
}
set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
+ Builtins* builtins = map->heap()->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
construct_stub());
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -7259,9 +6721,8 @@ void SharedFunctionInfo::DetachInitialMap() {
// then StartInobjectTracking will be called again the next time the
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
- Heap* heap = map->GetHeap();
- set_initial_map(heap->raw_unchecked_undefined_value());
- Builtins* builtins = heap->isolate()->builtins();
+ set_initial_map(map->heap()->raw_unchecked_undefined_value());
+ Builtins* builtins = map->heap()->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
@@ -7277,7 +6738,7 @@ void SharedFunctionInfo::AttachInitialMap(Map* map) {
// Resume inobject slack tracking.
set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
+ Builtins* builtins = map->heap()->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
*RawField(this, kConstructStubOffset));
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -7309,7 +6770,7 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
Map* map = Map::cast(initial_map());
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
set_initial_map(heap->undefined_value());
Builtins* builtins = heap->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
@@ -7371,14 +6832,8 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
}
-void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- VisitPointer(rinfo->target_object_address());
-}
-
-
void Code::InvalidateRelocation() {
- set_relocation_info(GetHeap()->empty_byte_array());
+ set_relocation_info(heap()->empty_byte_array());
}
@@ -7412,7 +6867,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
Handle<Object> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(*p);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+ Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
it.rinfo()->set_target_cell(*cell);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles in inline cache targets to direct
@@ -7815,10 +7270,8 @@ static void CopySlowElementsToFast(NumberDictionary* source,
}
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacityMode set_capacity_mode) {
+MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
+ int length) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
@@ -7835,24 +7288,15 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
Map* new_map = NULL;
if (elements()->map() != heap->non_strict_arguments_elements_map()) {
Object* object;
- bool has_fast_smi_only_elements =
- (set_capacity_mode == kAllowSmiOnlyElements) &&
- (elements()->map()->has_fast_smi_only_elements() ||
- elements() == heap->empty_fixed_array());
- ElementsKind elements_kind = has_fast_smi_only_elements
- ? FAST_SMI_ONLY_ELEMENTS
- : FAST_ELEMENTS;
- MaybeObject* maybe = GetElementsTransitionMap(elements_kind);
+ MaybeObject* maybe = map()->GetFastElementsMap();
if (!maybe->ToObject(&object)) return maybe;
new_map = Map::cast(object);
}
- ElementsKind elements_kind = GetElementsKind();
- switch (elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
+ switch (GetElementsKind()) {
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
- WriteBarrierMode mode(new_elements->GetWriteBarrierMode(no_gc));
+ WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
set_map(new_map);
set_elements(new_elements);
@@ -7947,15 +7391,13 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
- { MaybeObject* maybe_obj =
- GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS);
+ { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
AssertNoAllocation no_gc;
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
elems->Initialize(FixedArray::cast(elements()));
break;
@@ -7993,9 +7435,8 @@ MaybeObject* JSObject::SetSlowElements(Object* len) {
uint32_t new_length = static_cast<uint32_t>(len->Number());
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case FAST_ELEMENTS: {
+ case FAST_DOUBLE_ELEMENTS:
// Make sure we never try to shrink dense arrays into sparse arrays.
ASSERT(static_cast<uint32_t>(
FixedArrayBase::cast(elements())->length()) <= new_length);
@@ -8061,7 +7502,7 @@ void JSArray::Expand(int required_size) {
Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
- GetIsolate()->factory()->SetContent(self, new_backing);
+ self->SetContent(*new_backing);
}
@@ -8084,15 +7525,13 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
if (value < 0) return ArrayLengthRangeError(GetHeap());
ElementsKind elements_kind = GetElementsKind();
switch (elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
int old_capacity = FixedArrayBase::cast(elements())->length();
if (value <= old_capacity) {
if (IsJSArray()) {
Object* obj;
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (elements_kind == FAST_ELEMENTS) {
MaybeObject* maybe_obj = EnsureWritableFastElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -8103,8 +7542,7 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
} else {
Address filler_start;
int filler_size;
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (GetElementsKind() == FAST_ELEMENTS) {
FixedArray* fast_elements = FixedArray::cast(elements());
fast_elements->set_length(value);
filler_start = fast_elements->address() +
@@ -8124,14 +7562,13 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
} else {
// Otherwise, fill the unused tail with holes.
int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ if (GetElementsKind() == FAST_ELEMENTS) {
FixedArray* fast_elements = FixedArray::cast(elements());
for (int i = value; i < old_length; i++) {
fast_elements->set_the_hole(i);
}
} else {
- ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS);
+ ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
FixedDoubleArray* fast_double_elements =
FixedDoubleArray::cast(elements());
for (int i = value; i < old_length; i++) {
@@ -8147,17 +7584,10 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
int new_capacity = value > min ? value : min;
if (!ShouldConvertToSlowElements(new_capacity)) {
MaybeObject* result;
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- SetFastElementsCapacityMode set_capacity_mode =
- elements_kind == FAST_SMI_ONLY_ELEMENTS
- ? kAllowSmiOnlyElements
- : kDontAllowSmiOnlyElements;
- result = SetFastElementsCapacityAndLength(new_capacity,
- value,
- set_capacity_mode);
+ if (GetElementsKind() == FAST_ELEMENTS) {
+ result = SetFastElementsCapacityAndLength(new_capacity, value);
} else {
- ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS);
+ ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
result = SetFastDoubleElementsCapacityAndLength(new_capacity,
value);
}
@@ -8214,13 +7644,10 @@ MaybeObject* JSObject::SetElementsLength(Object* len) {
// len is not a number so make the array size one and
// set only element to len.
Object* obj;
- MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
FixedArray::cast(obj)->set(0, len);
-
- maybe_obj = EnsureCanContainElements(&len, 1);
- if (maybe_obj->IsFailure()) return maybe_obj;
-
if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
set_elements(FixedArray::cast(obj));
return this;
@@ -8266,7 +7693,7 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
FixedArray* new_cache;
// Grow array by factor 2 over and above what we need.
{ MaybeObject* maybe_cache =
- GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
+ heap()->AllocateFixedArray(transitions * 2 * step + header);
if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
}
@@ -8319,7 +7746,7 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
- if (JSReceiver::cast(pt) == this) {
+ if (JSObject::cast(pt) == this) {
// Cycle detected.
HandleScope scope(heap->isolate());
return heap->isolate()->Throw(
@@ -8334,8 +7761,8 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// hidden and set the new prototype on that object.
Object* current_proto = real_receiver->GetPrototype();
while (current_proto->IsJSObject() &&
- JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
- real_receiver = JSReceiver::cast(current_proto);
+ JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+ real_receiver = JSObject::cast(current_proto);
current_proto = current_proto->GetPrototype();
}
}
@@ -8368,16 +7795,8 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
}
-MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
- uint32_t first_arg,
- uint32_t arg_count) {
- return EnsureCanContainElements(args->arguments() - first_arg, arg_count);
-}
-
-
bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -8438,11 +7857,6 @@ bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
Object* pt = GetPrototype();
if (pt->IsNull()) return false;
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(
- receiver, index) != ABSENT;
- }
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
@@ -8519,7 +7933,6 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -8634,7 +8047,6 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
ElementsKind kind = GetElementsKind();
switch (kind) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -8701,11 +8113,6 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
Object* pt = GetPrototype();
if (pt->IsNull()) return false;
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(
- receiver, index) != ABSENT;
- }
return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
@@ -8782,9 +8189,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
// __defineGetter__ callback
if (structure->IsFixedArray()) {
Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
+ if (getter->IsJSFunction()) {
+ return Object::GetPropertyWithDefinedGetter(receiver,
+ JSFunction::cast(getter));
}
// Getter is not a function.
return isolate->heap()->undefined_value();
@@ -8839,9 +8246,8 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
if (structure->IsFixedArray()) {
Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
- if (setter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
+ if (setter->IsJSFunction()) {
+ return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
@@ -8891,8 +8297,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastTypeElements() ||
- HasFastArgumentsElements());
+ ASSERT(HasFastElements() || HasFastArgumentsElements());
FixedArray* backing_store = FixedArray::cast(elements());
if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
@@ -8903,10 +8308,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if (!maybe->ToObject(&writable)) return maybe;
backing_store = FixedArray::cast(writable);
}
- uint32_t capacity = static_cast<uint32_t>(backing_store->length());
+ uint32_t length = static_cast<uint32_t>(backing_store->length());
if (check_prototype &&
- (index >= capacity || backing_store->get(index)->IsTheHole())) {
+ (index >= length || backing_store->get(index)->IsTheHole())) {
bool found;
MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
value,
@@ -8915,71 +8320,39 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if (found) return result;
}
- uint32_t new_capacity = capacity;
- // Check if the length property of this object needs to be updated.
- uint32_t array_length = 0;
- bool must_update_array_length = false;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
- if (index >= array_length) {
- must_update_array_length = true;
- array_length = index + 1;
- }
- }
- // Check if the capacity of the backing store needs to be increased, or if
- // a transition to slow elements is necessary.
- if (index >= capacity) {
- bool convert_to_slow = true;
- if ((index - capacity) < kMaxGap) {
- new_capacity = NewElementsCapacity(index + 1);
- ASSERT(new_capacity > index);
- if (!ShouldConvertToSlowElements(new_capacity)) {
- convert_to_slow = false;
+ // Check whether there is extra space in fixed array.
+ if (index < length) {
+ backing_store->set(index, value);
+ if (IsJSArray()) {
+ // Update the length of the array if needed.
+ uint32_t array_length = 0;
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ if (index >= array_length) {
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
}
- if (convert_to_slow) {
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
- return SetDictionaryElement(index, value, strict_mode, check_prototype);
- }
- }
- // Convert to fast double elements if appropriate.
- if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
- MaybeObject* maybe =
- SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
- if (maybe->IsFailure()) return maybe;
- FixedDoubleArray::cast(elements())->set(index, value->Number());
return value;
}
- // Change elements kind from SMI_ONLY to generic FAST if necessary.
- if (HasFastSmiOnlyElements() && !value->IsSmi()) {
- MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
- Map* new_map;
- if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
- set_map(new_map);
- }
- // Increase backing store capacity if that's been decided previously.
- if (new_capacity != capacity) {
- Object* new_elements;
- SetFastElementsCapacityMode set_capacity_mode =
- value->IsSmi() && HasFastSmiOnlyElements()
- ? kAllowSmiOnlyElements
- : kDontAllowSmiOnlyElements;
- MaybeObject* maybe =
- SetFastElementsCapacityAndLength(new_capacity,
- array_length,
- set_capacity_mode);
- if (!maybe->ToObject(&new_elements)) return maybe;
- FixedArray::cast(new_elements)->set(index, value);
- return value;
- }
- // Finally, set the new element and length.
- ASSERT(elements()->IsFixedArray());
- backing_store->set(index, value);
- if (must_update_array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(array_length));
+
+ // Allow gap in fast case.
+ if ((index - length) < kMaxGap) {
+ // Try allocating extra space.
+ int new_capacity = NewElementsCapacity(index + 1);
+ if (!ShouldConvertToSlowElements(new_capacity)) {
+ ASSERT(static_cast<uint32_t>(new_capacity) > index);
+ Object* new_elements;
+ MaybeObject* maybe =
+ SetFastElementsCapacityAndLength(new_capacity, index + 1);
+ if (!maybe->ToObject(&new_elements)) return maybe;
+ FixedArray::cast(new_elements)->set(index, value);
+ return value;
+ }
}
- return value;
+
+ // Otherwise default to slow case.
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
+ return SetDictionaryElement(index, value, strict_mode, check_prototype);
}
@@ -9075,9 +8448,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
MaybeObject* result = CanConvertToFastDoubleElements()
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : SetFastElementsCapacityAndLength(new_length,
- new_length,
- kDontAllowSmiOnlyElements);
+ : SetFastElementsCapacityAndLength(new_length, new_length);
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -9121,15 +8492,10 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
}
- MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
- elms_length,
- length,
- kDontAllowSmiOnlyElements);
+ MaybeObject* maybe_obj =
+ SetFastElementsCapacityAndLength(elms_length, length);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return SetFastElement(index,
- value,
- strict_mode,
- check_prototype);
+ return SetFastElement(index, value, strict_mode, check_prototype);
}
double double_value = value_is_smi
@@ -9180,17 +8546,6 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
}
-MaybeObject* JSReceiver::SetElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_proto) {
- return IsJSProxy()
- ? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode)
- : JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto)
- ;
-}
-
-
MaybeObject* JSObject::SetElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
@@ -9237,7 +8592,6 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
bool check_prototype) {
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
return SetFastElement(index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
@@ -9400,7 +8754,6 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
break;
}
// Fall through.
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
*capacity = backing_store->length();
@@ -9676,7 +9029,6 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
if (this->IsStringObjectWithCharacterAt(index)) return true;
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
@@ -9916,7 +9268,6 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
switch (GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
@@ -10598,7 +9949,7 @@ template class HashTable<CompilationCacheShape, HashTableKey*>;
template class HashTable<MapCacheShape, HashTableKey*>;
-template class HashTable<ObjectHashTableShape, JSReceiver*>;
+template class HashTable<ObjectHashTableShape, JSObject*>;
template class Dictionary<StringDictionaryShape, String*>;
@@ -10782,6 +10133,8 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// If the object is in dictionary mode, it is converted to fast elements
// mode.
MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
+ ASSERT(!HasExternalArrayElements());
+
Heap* heap = GetHeap();
if (HasDictionaryElements()) {
@@ -10795,7 +10148,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Convert to fast elements.
Object* obj;
- { MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
+ { MaybeObject* maybe_obj = map()->GetFastElementsMap();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
@@ -10811,16 +10164,13 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
set_map(new_map);
set_elements(fast_elements);
- } else if (HasExternalArrayElements()) {
- // External arrays cannot have holes or undefined elements.
- return Smi::FromInt(ExternalArray::cast(elements())->length());
} else if (!HasFastDoubleElements()) {
Object* obj;
{ MaybeObject* maybe_obj = EnsureWritableFastElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
- ASSERT(HasFastTypeElements() || HasFastDoubleElements());
+ ASSERT(HasFastElements() || HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
@@ -11944,9 +11294,9 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
}
-Object* ObjectHashTable::Lookup(JSReceiver* key) {
+Object* ObjectHashTable::Lookup(JSObject* key) {
// If the object does not have an identity hash, it was never used as a key.
- MaybeObject* maybe_hash = key->GetIdentityHash(OMIT_CREATION);
+ MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
int entry = FindEntry(key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -11954,10 +11304,10 @@ Object* ObjectHashTable::Lookup(JSReceiver* key) {
}
-MaybeObject* ObjectHashTable::Put(JSReceiver* key, Object* value) {
+MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
// Make sure the key object has an identity hash code.
int hash;
- { MaybeObject* maybe_hash = key->GetIdentityHash(ALLOW_CREATION);
+ { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
@@ -11987,7 +11337,7 @@ MaybeObject* ObjectHashTable::Put(JSReceiver* key, Object* value) {
}
-void ObjectHashTable::AddEntry(int entry, JSReceiver* key, Object* value) {
+void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
set(EntryToIndex(entry), key);
set(EntryToIndex(entry) + 1, value);
ElementAdded();
@@ -12251,7 +11601,7 @@ int BreakPointInfo::GetBreakPointCount() {
// Multiple break points.
return FixedArray::cast(break_point_objects())->length();
}
-#endif // ENABLE_DEBUGGER_SUPPORT
+#endif
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index b95fa574a0..d9c7a82276 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -38,7 +38,6 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#endif
-#include "v8checks.h"
//
// Most object types in the V8 JavaScript are described in this file.
@@ -137,13 +136,8 @@ namespace v8 {
namespace internal {
enum ElementsKind {
- // The "fast" kind for elements that only contain SMI values. Must be first
- // to make it possible to efficiently check maps for this kind.
- FAST_SMI_ONLY_ELEMENTS,
-
- // The "fast" kind for tagged values. Must be second to make it possible to
- // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
- // together at once.
+ // The "fast" kind for tagged values. Must be first to make it possible
+ // to efficiently check maps if they have fast elements.
FAST_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
@@ -166,7 +160,7 @@ enum ElementsKind {
// Derived constants from ElementsKind
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
- FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
+ FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
};
@@ -180,6 +174,7 @@ class PropertyDetails BASE_EMBEDDED {
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
int index = 0) {
+ ASSERT(type != ELEMENTS_TRANSITION);
ASSERT(TypeField::is_valid(type));
ASSERT(AttributesField::is_valid(attributes));
ASSERT(StorageField::is_valid(index));
@@ -193,6 +188,23 @@ class PropertyDetails BASE_EMBEDDED {
ASSERT(index == this->index());
}
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ ElementsKind elements_kind) {
+ ASSERT(type == ELEMENTS_TRANSITION);
+ ASSERT(TypeField::is_valid(type));
+ ASSERT(AttributesField::is_valid(attributes));
+ ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
+
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | StorageField::encode(static_cast<int>(elements_kind));
+
+ ASSERT(type == this->type());
+ ASSERT(attributes == this->attributes());
+ ASSERT(elements_kind == this->elements_kind());
+ }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
@@ -214,6 +226,11 @@ class PropertyDetails BASE_EMBEDDED {
int index() { return StorageField::decode(value_); }
+ ElementsKind elements_kind() {
+ ASSERT(type() == ELEMENTS_TRANSITION);
+ return static_cast<ElementsKind>(StorageField::decode(value_));
+ }
+
inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
@@ -259,13 +276,6 @@ enum NormalizedMapSharingMode {
};
-// Indicates whether a get method should implicitly create the object looked up.
-enum CreationFlag {
- ALLOW_CREATION,
- OMIT_CREATION
-};
-
-
// Instance size sentinel for objects of variable size.
static const int kVariableSizeSentinel = 0;
@@ -319,7 +329,6 @@ static const int kVariableSizeSentinel = 0;
V(HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
/* Note: the order of these external array */ \
/* types is relied upon in */ \
/* Object::IsExternalArray(). */ \
@@ -576,7 +585,6 @@ enum InstanceType {
HEAP_NUMBER_TYPE,
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
- FREE_SPACE_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
@@ -613,30 +621,24 @@ enum InstanceType {
JS_MESSAGE_OBJECT_TYPE,
- // All the following types are subtypes of JSReceiver, which corresponds to
- // objects in the JS sense. The first and the last type in this range are
- // the two forms of function. This organization enables using the same
- // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
- // NONCALLABLE_JS_OBJECT range.
- JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
- JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
-
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_VALUE_TYPE, // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
+ JS_PROXY_TYPE,
JS_WEAK_MAP_TYPE,
- JS_REGEXP_TYPE,
+ JS_REGEXP_TYPE, // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
- JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
+ JS_FUNCTION_TYPE, // FIRST_CALLABLE_SPEC_OBJECT_TYPE
+ JS_FUNCTION_PROXY_TYPE, // LAST_CALLABLE_SPEC_OBJECT_TYPE
// Pseudo-types
FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
+ LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
// Boundaries for testing for an external array.
@@ -649,23 +651,17 @@ enum InstanceType {
// are not continuous in this enum! The enum ranges instead reflect the
// external class names, where proxies are treated as either ordinary objects,
// or functions.
- FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
+ FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
LAST_JS_RECEIVER_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSObject
- FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSProxy
- FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
- LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
- // Boundaries for testing whether the type is a JavaScript object.
- FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
- LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
// Boundaries for testing the types for which typeof is "object".
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
- // Note that the types for which typeof is "function" are not continuous.
- // Define this so that we can put assertions on discrete checks.
- NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
+ // Boundaries for testing the types for which typeof is "function".
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
+ LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+ // Boundaries for testing whether the type is a JavaScript object.
+ FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
};
static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
@@ -701,7 +697,6 @@ class ElementsAccessor;
class FixedArrayBase;
class ObjectVisitor;
class StringStream;
-class Failure;
struct ValueInfo : public Malloced {
ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -715,6 +710,7 @@ struct ValueInfo : public Malloced {
// A template-ized version of the IsXXX functions.
template <class C> static inline bool Is(Object* obj);
+class Failure;
class MaybeObject BASE_EMBEDDED {
public:
@@ -752,7 +748,7 @@ class MaybeObject BASE_EMBEDDED {
// Prints this object with details.
inline void Print() {
Print(stdout);
- }
+ };
inline void PrintLn() {
PrintLn(stdout);
}
@@ -795,7 +791,6 @@ class MaybeObject BASE_EMBEDDED {
V(ExternalDoubleArray) \
V(ExternalPixelArray) \
V(ByteArray) \
- V(FreeSpace) \
V(JSReceiver) \
V(JSObject) \
V(JSContextExtensionObject) \
@@ -840,9 +835,6 @@ class MaybeObject BASE_EMBEDDED {
V(AccessCheckNeeded) \
V(JSGlobalPropertyCell) \
-
-class JSReceiver;
-
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -867,7 +859,6 @@ class Object : public MaybeObject {
#undef DECLARE_STRUCT_PREDICATE
INLINE(bool IsSpecObject());
- INLINE(bool IsSpecFunction());
// Oddball testing.
INLINE(bool IsUndefined());
@@ -876,10 +867,6 @@ class Object : public MaybeObject {
INLINE(bool IsTrue());
INLINE(bool IsFalse());
inline bool IsArgumentsMarker();
- inline bool NonFailureIsHeapObject();
-
- // Filler objects (fillers and free space objects).
- inline bool IsFiller();
// Extract the number.
inline double Number();
@@ -916,8 +903,15 @@ class Object : public MaybeObject {
LookupResult* result,
String* key,
PropertyAttributes* attributes);
+ MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
+ Object* structure,
+ String* name,
+ Object* holder);
+ MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
+ String* name,
+ Object* handler);
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
- JSReceiver* getter);
+ JSFunction* getter);
inline MaybeObject* GetElement(uint32_t index);
// For use when we know that no exception can be thrown.
@@ -1101,13 +1095,101 @@ class MapWord BASE_EMBEDDED {
// View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress();
- static inline MapWord FromRawValue(uintptr_t value) {
- return MapWord(value);
- }
+ // Marking phase of full collection: the map word of live objects is
+ // marked, and may be marked as overflowed (eg, the object is live, its
+ // children have not been visited, and it does not fit in the marking
+ // stack).
- inline uintptr_t ToRawValue() {
- return value_;
- }
+ // True if this map word's mark bit is set.
+ inline bool IsMarked();
+
+ // Return this map word but with its mark bit set.
+ inline void SetMark();
+
+ // Return this map word but with its mark bit cleared.
+ inline void ClearMark();
+
+ // True if this map word's overflow bit is set.
+ inline bool IsOverflowed();
+
+ // Return this map word but with its overflow bit set.
+ inline void SetOverflow();
+
+ // Return this map word but with its overflow bit cleared.
+ inline void ClearOverflow();
+
+
+ // Compacting phase of a full compacting collection: the map word of live
+ // objects contains an encoding of the original map address along with the
+ // forwarding address (represented as an offset from the first live object
+ // in the same page as the (old) object address).
+
+ // Create a map word from a map address and a forwarding address offset.
+ static inline MapWord EncodeAddress(Address map_address, int offset);
+
+ // Return the map address encoded in this map word.
+ inline Address DecodeMapAddress(MapSpace* map_space);
+
+ // Return the forwarding offset encoded in this map word.
+ inline int DecodeOffset();
+
+
+ // During serialization: the map word is used to hold an encoded
+ // address, and possibly a mark bit (set and cleared with SetMark
+ // and ClearMark).
+
+ // Create a map word from an encoded address.
+ static inline MapWord FromEncodedAddress(Address address);
+
+ inline Address ToEncodedAddress();
+
+ // Bits used by the marking phase of the garbage collector.
+ //
+ // The first word of a heap object is normally a map pointer. The last two
+ // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
+ // mark an object as live and/or overflowed:
+ // last bit = 0, marked as alive
+ // second bit = 1, overflowed
+ // An object is only marked as overflowed when it is marked as live while
+ // the marking stack is overflowed.
+ static const int kMarkingBit = 0; // marking bit
+ static const int kMarkingMask = (1 << kMarkingBit); // marking mask
+ static const int kOverflowBit = 1; // overflow bit
+ static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
+
+ // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
+ // used.
+ // +-----------------+------------------+-----------------+
+ // |forwarding offset|page offset of map|page index of map|
+ // +-----------------+------------------+-----------------+
+ // ^ ^ ^
+ // | | |
+ // | | kMapPageIndexBits
+ // | kMapPageOffsetBits
+ // kForwardingOffsetBits
+ static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
+ static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
+#ifdef V8_HOST_ARCH_64_BIT
+ static const int kMapPageIndexBits = 16;
+#else
+ // Use all the 32-bits to encode on a 32-bit platform.
+ static const int kMapPageIndexBits =
+ 32 - (kMapPageOffsetBits + kForwardingOffsetBits);
+#endif
+
+ static const int kMapPageIndexShift = 0;
+ static const int kMapPageOffsetShift =
+ kMapPageIndexShift + kMapPageIndexBits;
+ static const int kForwardingOffsetShift =
+ kMapPageOffsetShift + kMapPageOffsetBits;
+
+ // Bit masks covering the different parts the encoding.
+ static const uintptr_t kMapPageIndexMask =
+ (1 << kMapPageOffsetShift) - 1;
+ static const uintptr_t kMapPageOffsetMask =
+ ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
+ static const uintptr_t kForwardingOffsetMask =
+ ~(kMapPageIndexMask | kMapPageOffsetMask);
private:
// HeapObject calls the private constructor and directly reads the value.
@@ -1127,7 +1209,6 @@ class HeapObject: public Object {
// information.
inline Map* map();
inline void set_map(Map* value);
- inline void set_map_unsafe(Map* value);
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
@@ -1135,8 +1216,8 @@ class HeapObject: public Object {
inline void set_map_word(MapWord map_word);
// The Heap the object was allocated in. Used also to access Isolate.
+ // This method can not be used during GC, it ASSERTs this.
inline Heap* GetHeap();
-
// Convenience method to get current isolate. This method can be
// accessed only when its result is the same as
// Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
@@ -1165,6 +1246,31 @@ class HeapObject: public Object {
// GC internal.
inline int SizeFromMap(Map* map);
+ // Support for the marking heap objects during the marking phase of GC.
+ // True if the object is marked live.
+ inline bool IsMarked();
+
+ // Mutate this object's map pointer to indicate that the object is live.
+ inline void SetMark();
+
+ // Mutate this object's map pointer to remove the indication that the
+ // object is live (ie, partially restore the map pointer).
+ inline void ClearMark();
+
+ // True if this object is marked as overflowed. Overflowed objects have
+ // been reached and marked during marking of the heap, but their children
+ // have not necessarily been marked and they have not been pushed on the
+ // marking stack.
+ inline bool IsOverflowed();
+
+ // Mutate this object's map pointer to indicate that the object is
+ // overflowed.
+ inline void SetOverflow();
+
+ // Mutate this object's map pointer to remove the indication that the
+ // object is overflowed (ie, partially restore the map pointer).
+ inline void ClearOverflow();
+
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
// Does not invoke write barrier, so should only be assigned to
@@ -1188,14 +1294,18 @@ class HeapObject: public Object {
HeapObjectPrint(stdout);
}
void HeapObjectPrint(FILE* out);
- void PrintHeader(FILE* out, const char* id);
#endif
-
#ifdef DEBUG
void HeapObjectVerify();
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
+#endif
+
+#ifdef OBJECT_PRINT
+ void PrintHeader(FILE* out, const char* id);
+#endif
+#ifdef DEBUG
// Verify a pointer is a valid HeapObject pointer that points to object
// areas in the heap.
static void VerifyHeapPointer(Object* p);
@@ -1338,18 +1448,8 @@ class JSReceiver: public HeapObject {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value);
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
- // Set the index'th array element.
- // Can cause GC, or return failure if GC is required.
- MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype);
// Returns the class name ([[Class]] property in the specification).
String* class_name();
@@ -1366,7 +1466,6 @@ class JSReceiver: public HeapObject {
// Can cause a GC.
inline bool HasProperty(String* name);
inline bool HasLocalProperty(String* name);
- inline bool HasElement(uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -1375,18 +1474,11 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
bool skip_hidden_prototypes);
- // Retrieves a permanent object identity hash code. The undefined value might
- // be returned in case no has been created yet and OMIT_CREATION was used.
- inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
// Lookup a property. If found, the result is valid and has
// detailed information.
void LocalLookup(String* name, LookupResult* result);
void Lookup(String* name, LookupResult* result);
- protected:
- Smi* GenerateIdentityHash();
-
private:
PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
LookupResult* result,
@@ -1433,14 +1525,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline ElementsAccessor* GetElementsAccessor();
- inline bool HasFastSmiOnlyElements();
inline bool HasFastElements();
- // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
- // elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and
- // HasFastElements to HasFastObjectElements.
- inline bool HasFastTypeElements();
inline bool HasFastDoubleElements();
- inline bool HasNonStrictArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalPixelElements();
inline bool HasExternalArrayElements();
@@ -1468,11 +1554,6 @@ class JSObject: public JSReceiver {
// a dictionary, and it will stay a dictionary.
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- String* name);
-
- // Can cause GC.
MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
String* key,
Object* value,
@@ -1490,6 +1571,8 @@ class JSObject: public JSReceiver {
Object* value,
JSObject* holder,
StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
+ Object* value);
MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
String* name,
Object* value,
@@ -1577,28 +1660,37 @@ class JSObject: public JSReceiver {
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
- // Instead they are stored in an auxiliary structure kept as a local
+ // Instead they are stored on an auxiliary JSObject stored as a local
// property with a special name Heap::hidden_symbol(). But if the
// receiver is a JSGlobalProxy then the auxiliary object is a property
- // of its prototype, and if it's a detached proxy, then you can't have
- // hidden properties.
-
- // Sets a hidden property on this object. Returns this object if successful,
- // undefined if called on a detached proxy, and a failure if a GC
- // is required
- MaybeObject* SetHiddenProperty(String* key, Object* value);
- // Gets the value of a hidden property with the given key. Returns undefined
- // if the property doesn't exist (or if called on a detached proxy),
- // otherwise returns the value set for the key.
- Object* GetHiddenProperty(String* key);
- // Deletes a hidden property. Deleting a non-existing property is
- // considered successful.
- void DeleteHiddenProperty(String* key);
- // Returns true if the object has a property with the hidden symbol as name.
- bool HasHiddenProperties();
-
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
+ // of its prototype.
+ //
+ // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
+ // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
+ // holder.
+ //
+ // These accessors do not touch interceptors or accessors.
+ inline bool HasHiddenPropertiesObject();
+ inline Object* GetHiddenPropertiesObject();
+ MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
+ Object* hidden_obj);
+
+ // Indicates whether the hidden properties object should be created.
+ enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
+
+ // Retrieves the hidden properties object.
+ //
+ // The undefined value might be returned in case no hidden properties object
+ // is present and creation was omitted.
+ inline bool HasHiddenProperties();
+ MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
+
+ // Retrieves a permanent object identity hash code.
+ //
+ // The identity hash is stored as a hidden property. The undefined value might
+ // be returned in case no hidden properties object is present and creation was
+ // omitted.
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
@@ -1606,19 +1698,6 @@ class JSObject: public JSReceiver {
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
- inline void ValidateSmiOnlyElements();
-
- // Makes sure that this object can contain non-smi Object as elements.
- inline MaybeObject* EnsureCanContainNonSmiElements();
-
- // Makes sure that this object can contain the specified elements.
- inline MaybeObject* EnsureCanContainElements(Object** elements,
- uint32_t count);
- inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
- MaybeObject* EnsureCanContainElements(Arguments* arguments,
- uint32_t first_arg,
- uint32_t arg_count);
-
// Do we want to keep the elements in fast case when increasing the
// capacity?
bool ShouldConvertToSlowElements(int new_capacity);
@@ -1632,6 +1711,7 @@ class JSObject: public JSReceiver {
bool CanConvertToFastDoubleElements();
// Tells whether the index'th element is present.
+ inline bool HasElement(uint32_t index);
bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
// Computes the new capacity when expanding the elements of a JSObject.
@@ -1667,7 +1747,6 @@ class JSObject: public JSReceiver {
Object* value,
StrictModeFlag strict_mode,
bool check_prototype);
-
MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
@@ -1690,18 +1769,11 @@ class JSObject: public JSReceiver {
// The undefined object if index is out of bounds.
MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
- enum SetFastElementsCapacityMode {
- kAllowSmiOnlyElements,
- kDontAllowSmiOnlyElements
- };
-
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacityMode set_capacity_mode);
+ MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
+ int length);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
@@ -1729,6 +1801,10 @@ class JSObject: public JSReceiver {
inline Object* GetInternalField(int index);
inline void SetInternalField(int index, Object* value);
+ // Lookup a property. If found, the result is valid and has
+ // detailed information.
+ void LocalLookup(String* name, LookupResult* result);
+
// The following lookup functions skip interceptors.
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedProperty(String* name, LookupResult* result);
@@ -1784,11 +1860,6 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes);
- // Returns a new map with all transitions dropped from the object's current
- // map and the ElementsKind set.
- MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
- ElementsKind elements_kind);
-
// Converts a descriptor of any other type to a real field,
// backed by the properties array. Descriptors of visible
// types, such as CONSTANT_FUNCTION, keep their enumeration order.
@@ -1854,14 +1925,11 @@ class JSObject: public JSReceiver {
WriteBarrierMode mode
= UPDATE_WRITE_BARRIER);
- // Initializes the body after properties slot, properties slot is
- // initialized by set_properties. Fill the pre-allocated fields with
- // pre_allocated_value and the rest with filler_value.
- // Note: this call does not update write barrier, the caller is responsible
- // to ensure that |filler_value| can be collected without WB here.
- inline void InitializeBody(Map* map,
- Object* pre_allocated_value,
- Object* filler_value);
+ // initializes the body after properties slot, properties slot is
+ // initialized by set_properties
+ // Note: this call does not update write barrier, it is caller's
+ // reponsibility to ensure that *v* can be collected without WB here.
+ inline void InitializeBody(int object_size, Object* value);
// Check whether this object references another object
bool ReferencesObject(Object* obj);
@@ -1986,18 +2054,6 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode,
bool check_prototype);
- // Searches the prototype chain for a callback setter and sets the property
- // with the setter if it finds one. The '*found' flag indicates whether
- // a setter was found or not.
- // This function can cause GC and can return a failure result with
- // '*found==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- bool* found,
- StrictModeFlag strict_mode);
-
MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
@@ -2036,15 +2092,6 @@ class JSObject: public JSReceiver {
void LookupInDescriptor(String* name, LookupResult* result);
- // Returns the hidden properties backing store object, currently
- // a StringDictionary, stored on this object.
- // If no hidden properties object has been put on this object,
- // return undefined, unless create_if_absent is true, in which case
- // a new dictionary is created, added to this object, and returned.
- MaybeObject* GetHiddenPropertiesDictionary(bool create_if_absent);
- // Updates the existing hidden properties dictionary.
- MaybeObject* SetHiddenPropertiesDictionary(StringDictionary* dictionary);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2870,7 +2917,7 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
JSObject* obj,
int unused_property_fields);
- // Find entry for key, otherwise return kNotFound. Optimized version of
+ // Find entry for key otherwise return kNotFound. Optimzed version of
// HashTable::FindEntry.
int FindEntry(String* key);
};
@@ -2933,10 +2980,10 @@ class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
class ObjectHashTableShape {
public:
- static inline bool IsMatch(JSReceiver* key, Object* other);
- static inline uint32_t Hash(JSReceiver* key);
- static inline uint32_t HashForObject(JSReceiver* key, Object* object);
- MUST_USE_RESULT static inline MaybeObject* AsObject(JSReceiver* key);
+ static inline bool IsMatch(JSObject* key, Object* other);
+ static inline uint32_t Hash(JSObject* key);
+ static inline uint32_t HashForObject(JSObject* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
static const int kPrefixSize = 0;
static const int kEntrySize = 2;
};
@@ -2944,7 +2991,7 @@ class ObjectHashTableShape {
// ObjectHashTable maps keys that are JavaScript objects to object values by
// using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape, JSReceiver*> {
+class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
public:
static inline ObjectHashTable* cast(Object* obj) {
ASSERT(obj->IsHashTable());
@@ -2953,16 +3000,16 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape, JSReceiver*> {
// Looks up the value associated with the given key. The undefined value is
// returned in case the key is not present.
- Object* Lookup(JSReceiver* key);
+ Object* Lookup(JSObject* key);
// Adds (or overwrites) the value associated with the given key. Mapping a
// key to the undefined value causes removal of the whole entry.
- MUST_USE_RESULT MaybeObject* Put(JSReceiver* key, Object* value);
+ MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
private:
friend class MarkCompactCollector;
- void AddEntry(int entry, JSReceiver* key, Object* value);
+ void AddEntry(int entry, JSObject* key, Object* value);
void RemoveEntry(int entry, Heap* heap);
inline void RemoveEntry(int entry);
@@ -3011,68 +3058,6 @@ class JSFunctionResultCache: public FixedArray {
};
-// This object provides quick access to scope info details for runtime
-// routines w/o the need to explicitly create a ScopeInfo object.
-class SerializedScopeInfo : public FixedArray {
- public :
- static SerializedScopeInfo* cast(Object* object) {
- ASSERT(object->IsSerializedScopeInfo());
- return reinterpret_cast<SerializedScopeInfo*>(object);
- }
-
- // Does this scope call eval?
- bool CallsEval();
-
- // Is this scope a strict mode scope?
- bool IsStrictMode();
-
- // Return the number of stack slots for code.
- int NumberOfStackSlots();
-
- // Return the number of context slots for code.
- int NumberOfContextSlots();
-
- // Return if this has context slots besides MIN_CONTEXT_SLOTS;
- bool HasHeapAllocatedLocals();
-
- // Lookup support for serialized scope info. Returns the
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be a symbol
- // (canonicalized).
- int StackSlotIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be a symbol (canonicalized).
- // If the slot is present and mode != NULL, sets *mode to the corresponding
- // mode for that variable.
- int ContextSlotIndex(String* name, VariableMode* mode);
-
- // Lookup support for serialized scope info. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be a symbol (canonicalized).
- int ParameterIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // function context slot index if the function name is present (named
- // function expressions, only), otherwise returns a value < 0. The name
- // must be a symbol (canonicalized).
- int FunctionContextSlotIndex(String* name);
-
- static Handle<SerializedScopeInfo> Create(Scope* scope);
-
- // Serializes empty scope info.
- static SerializedScopeInfo* Empty();
-
- private:
- Object** ContextEntriesAddr();
-
- Object** ParameterEntriesAddr();
-
- Object** StackSlotEntriesAddr();
-};
-
-
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
@@ -3094,12 +3079,11 @@ class NormalizedMapCache: public FixedArray {
};
-// ByteArray represents fixed sized byte arrays. Used for the relocation info
-// that is attached to code objects.
+// ByteArray represents fixed sized byte arrays. Used by the outside world,
+// such as PCRE, and also by the memory allocator and garbage collector to
+// fill in free blocks in the heap.
class ByteArray: public FixedArrayBase {
public:
- inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
@@ -3156,44 +3140,6 @@ class ByteArray: public FixedArrayBase {
};
-// FreeSpace represents fixed sized areas of the heap that are not currently in
-// use. Used by the heap and GC.
-class FreeSpace: public HeapObject {
- public:
- // [size]: size of the free space including the header.
- inline int size();
- inline void set_size(int value);
-
- inline int Size() { return size(); }
-
- // Casting.
- static inline FreeSpace* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
- inline void FreeSpacePrint() {
- FreeSpacePrint(stdout);
- }
- void FreeSpacePrint(FILE* out);
-#endif
-#ifdef DEBUG
- void FreeSpaceVerify();
-#endif
-
- // Layout description.
- // Size is smi tagged when it is stored.
- static const int kSizeOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kSizeOffset + kPointerSize;
-
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- // Maximal size of a single FreeSpace.
- static const int kMaxSize = 512 * MB;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
-};
-
-
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
// implement the CanvasArray types being defined in the WebGL
@@ -3727,11 +3673,6 @@ class Code: public HeapObject {
inline int major_key();
inline void set_major_key(int value);
- // For stubs, tells whether they should always exist, so that they can be
- // called from other stubs.
- inline bool is_pregenerated();
- inline void set_is_pregenerated(bool value);
-
// [optimizable]: For FUNCTION kind, tells if it is optimizable.
inline bool optimizable();
inline void set_optimizable(bool value);
@@ -3791,11 +3732,6 @@ class Code: public HeapObject {
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
- // For kind STUB, major_key == CallFunction, tells whether there is
- // a function cache in the instruction stream.
- inline bool has_function_cache();
- inline void set_has_function_cache(bool flag);
-
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -3900,6 +3836,10 @@ class Code: public HeapObject {
void CodeVerify();
#endif
+ // Returns the isolate/heap this code object belongs to.
+ inline Isolate* isolate();
+ inline Heap* heap();
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -3935,7 +3875,6 @@ class Code: public HeapObject {
static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
- static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
static const int kFullCodeFlags = kOptimizableOffset + 1;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -3955,10 +3894,9 @@ class Code: public HeapObject {
class KindField: public BitField<Kind, 7, 4> {};
class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
- class IsPregeneratedField: public BitField<bool, 14, 1> {};
// Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 15;
+ static const int kArgumentsCountShift = 14;
static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
static const int kFlagsNotUsedInLookup =
@@ -4094,12 +4032,8 @@ class Map: public HeapObject {
(bit_field2() & kElementsKindMask) >> kElementsKindShift);
}
- // Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_only_elements() {
- return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
- }
-
// Tells whether the instance has fast elements.
+ // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
inline bool has_fast_elements() {
return elements_kind() == FAST_ELEMENTS;
}
@@ -4108,10 +4042,6 @@ class Map: public HeapObject {
return elements_kind() == FAST_DOUBLE_ELEMENTS;
}
- inline bool has_non_strict_arguments_elements() {
- return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
- }
-
inline bool has_external_array_elements() {
ElementsKind kind(elements_kind());
return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
@@ -4170,7 +4100,6 @@ class Map: public HeapObject {
// 1 + 2 * i: prototype
// 2 + 2 * i: target map
DECL_ACCESSORS(prototype_transitions, FixedArray)
-
inline FixedArray* unchecked_prototype_transitions();
static const int kProtoTransitionHeaderSize = 1;
@@ -4180,14 +4109,14 @@ class Map: public HeapObject {
static const int kProtoTransitionMapOffset = 1;
inline int NumberOfProtoTransitions() {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = unchecked_prototype_transitions();
if (cache->length() == 0) return 0;
return
Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
}
inline void SetNumberOfProtoTransitions(int value) {
- FixedArray* cache = prototype_transitions();
+ FixedArray* cache = unchecked_prototype_transitions();
ASSERT(cache->length() != 0);
cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
Smi::FromInt(value));
@@ -4209,6 +4138,27 @@ class Map: public HeapObject {
// instance descriptors.
MUST_USE_RESULT MaybeObject* CopyDropTransitions();
+ // Returns this map if it already has elements that are fast, otherwise
+ // returns a copy of the map, with all transitions dropped from the
+ // descriptors and the ElementsKind set to FAST_ELEMENTS.
+ MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
+
+ // Returns this map if it already has fast elements that are doubles,
+ // otherwise returns a copy of the map, with all transitions dropped from the
+ // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
+ MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
+
+ // Returns this map if already has dictionary elements, otherwise returns a
+ // copy of the map, with all transitions dropped from the descriptors and the
+ // ElementsKind set to DICTIONARY_ELEMENTS.
+ MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
+
+ // Returns a new map with all transitions dropped from the descriptors and the
+ // ElementsKind set.
+ MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+ ElementsKind elements_kind,
+ bool safe_to_add_transition);
+
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -4247,8 +4197,6 @@ class Map: public HeapObject {
// This is undone in MarkCompactCollector::ClearNonLiveTransitions().
void CreateBackPointers();
- void CreateOneBackPointer(Map* transition_target);
-
// Set all map transitions from this map to dead maps to null.
// Also, restore the original prototype on the targets of these
// transitions, so that we do not process this map again while
@@ -4270,24 +4218,6 @@ class Map: public HeapObject {
return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
}
- // Returns the contents of this map's descriptor array for the given string.
- // May return NULL. |safe_to_add_transition| is set to false and NULL
- // is returned if adding transitions is not allowed.
- Object* GetDescriptorContents(String* sentinel_name,
- bool* safe_to_add_transitions);
-
- // Returns the map that this map transitions to if its elements_kind
- // is changed to |elements_kind|, or NULL if no such map is cached yet.
- // |safe_to_add_transitions| is set to false if adding transitions is not
- // allowed.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind,
- bool* safe_to_add_transition);
-
- // Adds an entry to this map's descriptor array for a transition to
- // |transitioned_map| when its elements_kind is changed to |elements_kind|.
- MaybeObject* AddElementsTransition(ElementsKind elements_kind,
- Map* transitioned_map);
-
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline void MapPrint() {
@@ -4303,6 +4233,10 @@ class Map: public HeapObject {
inline int visitor_id();
inline void set_visitor_id(int visitor_id);
+ // Returns the isolate/heap this map belongs to.
+ inline Isolate* isolate();
+ inline Heap* heap();
+
typedef void (*TraverseCallback)(Map* map, void* data);
void TraverseTransitionTree(TraverseCallback callback, void* data);
@@ -4339,7 +4273,7 @@ class Map: public HeapObject {
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
- // being continuously allocated.
+ // being continiously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset =
Map::kPrototypeTransitionsOffset + kPointerSize;
@@ -4379,7 +4313,7 @@ class Map: public HeapObject {
static const int kStringWrapperSafeForDefaultValueOf = 2;
static const int kAttachedToSharedFunctionInfo = 3;
// No bits can be used after kElementsKindFirstBit, they are all reserved for
- // storing ElementKind.
+ // storing ElementKind. for anything other than storing the ElementKind.
static const int kElementsKindShift = 4;
static const int kElementsKindBitCount = 4;
@@ -4388,9 +4322,6 @@ class Map: public HeapObject {
((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
(FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
- static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
- static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
- Map::kElementsKindShift) - 1;
// Bit positions for bit field 3
static const int kIsShared = 0;
@@ -4405,7 +4336,6 @@ class Map: public HeapObject {
kSize> BodyDescriptor;
private:
- String* elements_transition_sentinel_name();
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -5297,6 +5227,8 @@ class GlobalObject: public JSObject {
static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
private:
+ friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
};
@@ -6294,9 +6226,6 @@ class SeqString: public String {
// Casting.
static inline SeqString* cast(Object* obj);
- // Layout description.
- static const int kHeaderSize = String::kSize;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
@@ -6330,8 +6259,12 @@ class SeqAsciiString: public SeqString {
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
+ // Layout description.
+ static const int kHeaderSize = String::kSize;
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
// Maximal memory usage for a single sequential ASCII string.
- static const int kMaxSize = 512 * MB - 1;
+ static const int kMaxSize = 512 * MB;
// Maximal length of a single sequential ASCII string.
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize);
@@ -6380,8 +6313,12 @@ class SeqTwoByteString: public SeqString {
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
}
+ // Layout description.
+ static const int kHeaderSize = String::kSize;
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
// Maximal memory usage for a single sequential two-byte string.
- static const int kMaxSize = 512 * MB - 1;
+ static const int kMaxSize = 512 * MB;
// Maximal length of a single sequential two-byte string.
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
@@ -6543,8 +6480,8 @@ class ExternalAsciiString: public ExternalString {
typedef v8::String::ExternalAsciiStringResource Resource;
// The underlying resource.
- inline const Resource* resource();
- inline void set_resource(const Resource* buffer);
+ inline Resource* resource();
+ inline void set_resource(Resource* buffer);
// Dispatched behavior.
uint16_t ExternalAsciiStringGet(int index);
@@ -6580,8 +6517,8 @@ class ExternalTwoByteString: public ExternalString {
typedef v8::String::ExternalStringResource Resource;
// The underlying string resource.
- inline const Resource* resource();
- inline void set_resource(const Resource* buffer);
+ inline Resource* resource();
+ inline void set_resource(Resource* buffer);
// Dispatched behavior.
uint16_t ExternalTwoByteStringGet(int index);
@@ -6732,9 +6669,6 @@ class Oddball: public HeapObject {
static const byte kUndefined = 5;
static const byte kOther = 6;
- // The ToNumber value of a hidden oddball is a negative smi.
- static const int kLeastHiddenOddballNumber = -5;
-
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;
@@ -6770,6 +6704,10 @@ class JSGlobalPropertyCell: public HeapObject {
kValueOffset + kPointerSize,
kSize> BodyDescriptor;
+ // Returns the isolate/heap this cell object belongs to.
+ inline Isolate* isolate();
+ inline Heap* heap();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
};
@@ -6781,56 +6719,25 @@ class JSProxy: public JSReceiver {
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
- // [hash]: The hash code property (undefined if not initialized yet).
- DECL_ACCESSORS(hash, Object)
-
// Casting.
static inline JSProxy* cast(Object* obj);
bool HasPropertyWithHandler(String* name);
- bool HasElementWithHandler(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
- Object* receiver,
- String* name);
- MUST_USE_RESULT MaybeObject* GetElementWithHandler(
- Object* receiver,
- uint32_t index);
MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
String* name,
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithHandler(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode);
-
- // If the handler defines an accessor property, invoke its setter
- // (or throw if only a getter exists) and set *found to true. Otherwise false.
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* found);
MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
String* name,
DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
- uint32_t index,
- DeleteMode mode);
MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
JSReceiver* receiver,
- String* name);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
- JSReceiver* receiver,
- uint32_t index);
-
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ String* name,
+ bool* has_exception);
// Turn this into an (empty) JSObject.
void Fix();
@@ -6838,13 +6745,6 @@ class JSProxy: public JSReceiver {
// Initializes the body after the handler slot.
inline void InitializeBody(int object_size, Object* value);
- // Invoke a trap by name. If the trap does not exist on this's handler,
- // but derived_trap is non-NULL, invoke that instead. May cause GC.
- Handle<Object> CallTrap(const char* name,
- Handle<Object> derived_trap,
- int argc,
- Handle<Object> args[]);
-
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline void JSProxyPrint() {
@@ -6860,8 +6760,7 @@ class JSProxy: public JSReceiver {
// size as a virgin JSObject. This is essential for becoming a JSObject
// upon freeze.
static const int kHandlerOffset = HeapObject::kHeaderSize;
- static const int kHashOffset = kHandlerOffset + kPointerSize;
- static const int kPaddingOffset = kHashOffset + kPointerSize;
+ static const int kPaddingOffset = kHandlerOffset + kPointerSize;
static const int kSize = JSObject::kHeaderSize;
static const int kHeaderSize = kPaddingOffset;
static const int kPaddingSize = kSize - kPaddingOffset;
@@ -6869,7 +6768,7 @@ class JSProxy: public JSReceiver {
STATIC_CHECK(kPaddingSize >= 0);
typedef FixedBodyDescriptor<kHandlerOffset,
- kPaddingOffset,
+ kHandlerOffset + kPointerSize,
kSize> BodyDescriptor;
private:
@@ -6900,7 +6799,7 @@ class JSFunctionProxy: public JSProxy {
#endif
// Layout description.
- static const int kCallTrapOffset = JSProxy::kPaddingOffset;
+ static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
static const int kSize = JSFunction::kSize;
@@ -6921,7 +6820,7 @@ class JSFunctionProxy: public JSProxy {
class JSWeakMap: public JSObject {
public:
// [table]: the backing hash table mapping keys to values.
- DECL_ACCESSORS(table, Object)
+ DECL_ACCESSORS(table, ObjectHashTable)
// [next]: linked list of encountered weak maps during GC.
DECL_ACCESSORS(next, Object)
@@ -7014,7 +6913,7 @@ class JSArray: public JSObject {
MUST_USE_RESULT MaybeObject* Initialize(int capacity);
// Set the content of the array to the content of storage.
- inline MaybeObject* SetContent(FixedArray* storage);
+ inline void SetContent(FixedArray* storage);
// Casting.
static inline JSArray* cast(Object* obj);
@@ -7230,6 +7129,7 @@ class TemplateInfo: public Struct {
static const int kPropertyListOffset = kTagOffset + kPointerSize;
static const int kHeaderSize = kPropertyListOffset + kPointerSize;
protected:
+ friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
};
@@ -7533,9 +7433,6 @@ class ObjectVisitor BASE_EMBEDDED {
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
- // Visit pointer embedded into a code object.
- virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
-
// Visits a contiguous arrays of external references (references to the C++
// heap) in the half-open range [start, end). Any or all of the values
// may be modified on return.
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index fb94a1a60b..f9500c405b 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -587,7 +587,7 @@ Parser::Parser(Handle<Script> script,
fni_(NULL),
stack_overflow_(false),
parenthesized_function_(false),
- harmony_scoping_(false) {
+ harmony_block_scoping_(false) {
AstNode::ResetIds();
}
@@ -650,7 +650,7 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
- if (ok && harmony_scoping_) {
+ if (ok && harmony_block_scoping_) {
CheckConflictingVarDeclarations(scope, &ok);
}
@@ -817,9 +817,9 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
isolate()->Throw(*result, &location);
}
-void Parser::SetHarmonyScoping(bool block_scoping) {
- scanner().SetHarmonyScoping(block_scoping);
- harmony_scoping_ = block_scoping;
+void Parser::SetHarmonyBlockScoping(bool block_scoping) {
+ scanner().SetHarmonyBlockScoping(block_scoping);
+ harmony_block_scoping_ = block_scoping;
}
// Base class containing common code for the different finder classes used by
@@ -957,18 +957,17 @@ class InitializationBlockFinder : public ParserFinder {
};
-// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
+// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
// this.x = ...;, where x is a named property. It also determines whether a
// function contains only assignments of this type.
-class ThisNamedPropertyAssignmentFinder : public ParserFinder {
+class ThisNamedPropertyAssigmentFinder : public ParserFinder {
public:
- explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
+ explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
: isolate_(isolate),
only_simple_this_property_assignments_(true),
- names_(0),
- assigned_arguments_(0),
- assigned_constants_(0) {
- }
+ names_(NULL),
+ assigned_arguments_(NULL),
+ assigned_constants_(NULL) {}
void Update(Scope* scope, Statement* stat) {
// Bail out if function already has property assignment that are
@@ -995,17 +994,19 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
// Returns a fixed array containing three elements for each assignment of the
// form this.x = y;
Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_.is_empty()) {
+ if (names_ == NULL) {
return isolate_->factory()->empty_fixed_array();
}
- ASSERT_EQ(names_.length(), assigned_arguments_.length());
- ASSERT_EQ(names_.length(), assigned_constants_.length());
+ ASSERT(names_ != NULL);
+ ASSERT(assigned_arguments_ != NULL);
+ ASSERT_EQ(names_->length(), assigned_arguments_->length());
+ ASSERT_EQ(names_->length(), assigned_constants_->length());
Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_.length() * 3);
- for (int i = 0; i < names_.length(); ++i) {
- assignments->set(i * 3, *names_[i]);
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
- assignments->set(i * 3 + 2, *assigned_constants_[i]);
+ isolate_->factory()->NewFixedArray(names_->length() * 3);
+ for (int i = 0; i < names_->length(); i++) {
+ assignments->set(i * 3, *names_->at(i));
+ assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
+ assignments->set(i * 3 + 2, *assigned_constants_->at(i));
}
return assignments;
}
@@ -1062,37 +1063,18 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
AssignmentFromSomethingElse();
}
-
-
-
- // We will potentially reorder the property assignments, so they must be
- // simple enough that the ordering does not matter.
void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = index;
- assigned_constants_[i] = isolate_->factory()->undefined_value();
- return;
- }
- }
- names_.Add(name);
- assigned_arguments_.Add(index);
- assigned_constants_.Add(isolate_->factory()->undefined_value());
+ EnsureAllocation();
+ names_->Add(name);
+ assigned_arguments_->Add(index);
+ assigned_constants_->Add(isolate_->factory()->undefined_value());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureInitialized();
- for (int i = 0; i < names_.length(); ++i) {
- if (name->Equals(*names_[i])) {
- assigned_arguments_[i] = -1;
- assigned_constants_[i] = value;
- return;
- }
- }
- names_.Add(name);
- assigned_arguments_.Add(-1);
- assigned_constants_.Add(value);
+ EnsureAllocation();
+ names_->Add(name);
+ assigned_arguments_->Add(-1);
+ assigned_constants_->Add(value);
}
void AssignmentFromSomethingElse() {
@@ -1100,36 +1082,35 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder {
only_simple_this_property_assignments_ = false;
}
- void EnsureInitialized() {
- if (names_.capacity() == 0) {
- ASSERT(assigned_arguments_.capacity() == 0);
- ASSERT(assigned_constants_.capacity() == 0);
- names_.Initialize(4);
- assigned_arguments_.Initialize(4);
- assigned_constants_.Initialize(4);
+ void EnsureAllocation() {
+ if (names_ == NULL) {
+ ASSERT(assigned_arguments_ == NULL);
+ ASSERT(assigned_constants_ == NULL);
+ Zone* zone = isolate_->zone();
+ names_ = new(zone) ZoneStringList(4);
+ assigned_arguments_ = new(zone) ZoneList<int>(4);
+ assigned_constants_ = new(zone) ZoneObjectList(4);
}
}
Isolate* isolate_;
bool only_simple_this_property_assignments_;
- ZoneStringList names_;
- ZoneList<int> assigned_arguments_;
- ZoneObjectList assigned_constants_;
+ ZoneStringList* names_;
+ ZoneList<int>* assigned_arguments_;
+ ZoneObjectList* assigned_constants_;
};
Statement* Parser::ParseSourceElement(ZoneStringList* labels,
bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // SourceElement:
- // LetDeclaration
-
if (peek() == Token::FUNCTION) {
+ // FunctionDeclaration is only allowed in the context of SourceElements
+ // (Ecma 262 5th Edition, clause 14):
+ // SourceElement:
+ // Statement
+ // FunctionDeclaration
+ // Common language extension is to allow function declaration in place
+ // of any statement. This language extension is disabled in strict mode.
return ParseFunctionDeclaration(ok);
} else if (peek() == Token::LET) {
return ParseVariableStatement(kSourceElement, ok);
@@ -1143,7 +1124,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool* ok) {
// SourceElements ::
- // (SourceElement)* <end_token>
+ // (Statement)* <end_token>
// Allocate a target stack to use for this set of source
// elements. This way, all scripts and functions get their own
@@ -1153,7 +1134,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
ASSERT(processor != NULL);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
- ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
+ ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1314,13 +1295,8 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
}
case Token::FUNCTION: {
- // FunctionDeclaration is only allowed in the context of SourceElements
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- // Common language extension is to allow function declaration in place
- // of any statement. This language extension is disabled in strict mode.
+ // In strict mode, FunctionDeclaration is only allowed in the context
+ // of SourceElements.
if (top_scope_->is_strict_mode()) {
ReportMessageAt(scanner().peek_location(), "strict_function",
Vector<const char*>::empty());
@@ -1345,7 +1321,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
VariableProxy* Parser::Declare(Handle<String> name,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* fun,
bool resolve,
bool* ok) {
@@ -1363,7 +1339,7 @@ VariableProxy* Parser::Declare(Handle<String> name,
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
- Scope* declaration_scope = mode == LET ? top_scope_
+ Scope* declaration_scope = mode == Variable::LET ? top_scope_
: top_scope_->DeclarationScope();
if (declaration_scope->is_function_scope() ||
declaration_scope->is_strict_mode_eval_scope() ||
@@ -1385,12 +1361,12 @@ VariableProxy* Parser::Declare(Handle<String> name,
//
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
- if ((mode != VAR) || (var->mode() != VAR)) {
+ if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) {
// We only have vars, consts and lets in declarations.
- ASSERT(var->mode() == VAR ||
- var->mode() == CONST ||
- var->mode() == LET);
- if (harmony_scoping_) {
+ ASSERT(var->mode() == Variable::VAR ||
+ var->mode() == Variable::CONST ||
+ var->mode() == Variable::LET);
+ if (harmony_block_scoping_) {
// In harmony mode we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
@@ -1400,8 +1376,8 @@ VariableProxy* Parser::Declare(Handle<String> name,
*ok = false;
return NULL;
}
- const char* type = (var->mode() == VAR) ? "var" :
- (var->mode() == CONST) ? "const" : "let";
+ const char* type = (var->mode() == Variable::VAR) ? "var" :
+ (var->mode() == Variable::CONST) ? "const" : "let";
Handle<String> type_string =
isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
Expression* expression =
@@ -1434,10 +1410,14 @@ VariableProxy* Parser::Declare(Handle<String> name,
new(zone()) Declaration(proxy, mode, fun, top_scope_));
// For global const variables we bind the proxy to a variable.
- if (mode == CONST && declaration_scope->is_global_scope()) {
+ if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(declaration_scope, name, CONST, true, kind);
+ var = new(zone()) Variable(declaration_scope,
+ name,
+ Variable::CONST,
+ true,
+ kind);
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -1520,7 +1500,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are setup when entering the surrounding scope.
SharedFunctionInfoLiteral* lit =
new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
- VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK);
+ VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
return new(zone()) ExpressionStatement(new(zone()) Assignment(
isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
}
@@ -1542,14 +1522,14 @@ Statement* Parser::ParseFunctionDeclaration(bool* ok) {
// Even if we're not at the top-level of the global or a function
// scope, we treat is as such and introduce the function with it's
// initial value upon entering the corresponding scope.
- VariableMode mode = harmony_scoping_ ? LET : VAR;
+ Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR;
Declare(name, mode, fun, true, CHECK_OK);
return EmptyStatement();
}
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (harmony_scoping_) return ParseScopedBlock(labels, ok);
+ if (harmony_block_scoping_) return ParseScopedBlock(labels, ok);
// Block ::
// '{' Statement* '}'
@@ -1575,11 +1555,6 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
- // The harmony mode uses source elements instead of statements.
- //
- // Block ::
- // '{' SourceElement* '}'
-
// Construct block expecting 16 statements.
Block* body = new(zone()) Block(isolate(), labels, 16, false);
Scope* saved_scope = top_scope_;
@@ -1647,7 +1622,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
- VariableMode mode = VAR;
+ Variable::Mode mode = Variable::VAR;
// True if the binding needs initialization. 'let' and 'const' declared
// bindings are created uninitialized by their declaration nodes and
// need initialization. 'var' declared bindings are always initialized
@@ -1664,7 +1639,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
*ok = false;
return NULL;
}
- mode = CONST;
+ mode = Variable::CONST;
is_const = true;
needs_init = true;
init_op = Token::INIT_CONST;
@@ -1677,14 +1652,14 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
*ok = false;
return NULL;
}
- mode = LET;
+ mode = Variable::LET;
needs_init = true;
init_op = Token::INIT_LET;
} else {
UNREACHABLE(); // by current callers
}
- Scope* declaration_scope = (mode == LET)
+ Scope* declaration_scope = mode == Variable::LET
? top_scope_ : top_scope_->DeclarationScope();
// The scope of a var/const declared variable anywhere inside a function
// is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
@@ -1875,7 +1850,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// as the declaration. Thus dynamic lookups are unnecessary even if the
// block scope is inside a with.
if (value != NULL) {
- bool in_with = (mode == VAR) ? inside_with() : false;
+ bool in_with = mode == Variable::VAR ? inside_with() : false;
VariableProxy* proxy =
initialization_scope->NewUnresolved(name, in_with);
Assignment* assignment =
@@ -2249,7 +2224,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (top_scope_->is_strict_mode()) {
catch_scope->EnableStrictMode();
}
- VariableMode mode = harmony_scoping_ ? LET : VAR;
+ Variable::Mode mode = harmony_block_scoping_
+ ? Variable::LET : Variable::VAR;
catch_variable = catch_scope->DeclareLocal(name, mode);
Scope* saved_scope = top_scope_;
@@ -2642,7 +2618,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = new(zone()) CompareOperation(isolate(), cmp, x, y, position);
+ x = NewCompareNode(cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
@@ -2658,6 +2634,27 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
}
+Expression* Parser::NewCompareNode(Token::Value op,
+ Expression* x,
+ Expression* y,
+ int position) {
+ ASSERT(op != Token::NE && op != Token::NE_STRICT);
+ if (op == Token::EQ || op == Token::EQ_STRICT) {
+ bool is_strict = (op == Token::EQ_STRICT);
+ Literal* x_literal = x->AsLiteral();
+ if (x_literal != NULL && x_literal->IsNull()) {
+ return new(zone()) CompareToNull(isolate(), is_strict, y);
+ }
+
+ Literal* y_literal = y->AsLiteral();
+ if (y_literal != NULL && y_literal->IsNull()) {
+ return new(zone()) CompareToNull(isolate(), is_strict, x);
+ }
+ }
+ return new(zone()) CompareOperation(isolate(), op, x, y, position);
+}
+
+
Expression* Parser::ParseUnaryExpression(bool* ok) {
// UnaryExpression ::
// PostfixExpression
@@ -3714,7 +3711,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
- Scope* scope = (type == FunctionLiteral::DECLARATION && !harmony_scoping_)
+ Scope* scope = (type == FunctionLiteral::DECLARATION &&
+ !harmony_block_scoping_)
? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
: NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
@@ -3756,7 +3754,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
reserved_loc = scanner().location();
}
- top_scope_->DeclareParameter(param_name, harmony_scoping_ ? LET : VAR);
+ top_scope_->DeclareParameter(param_name,
+ harmony_block_scoping_
+ ? Variable::LET
+ : Variable::VAR);
num_parameters++;
if (num_parameters > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3883,7 +3884,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
}
}
- if (harmony_scoping_) {
+ if (harmony_block_scoping_) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
@@ -5121,10 +5122,10 @@ int ScriptDataImpl::ReadNumber(byte** source) {
static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
bool allow_lazy,
ParserRecorder* recorder,
- bool harmony_scoping) {
+ bool harmony_block_scoping) {
Isolate* isolate = Isolate::Current();
JavaScriptScanner scanner(isolate->unicode_cache());
- scanner.SetHarmonyScoping(harmony_scoping);
+ scanner.SetHarmonyBlockScoping(harmony_block_scoping);
scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
@@ -5146,7 +5147,7 @@ static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
// even if the preparser data is only used once.
ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
v8::Extension* extension,
- bool harmony_scoping) {
+ bool harmony_block_scoping) {
bool allow_lazy = FLAG_lazy && (extension == NULL);
if (!allow_lazy) {
// Partial preparsing is only about lazily compiled functions.
@@ -5154,17 +5155,17 @@ ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
return NULL;
}
PartialParserRecorder recorder;
- return DoPreParse(source, allow_lazy, &recorder, harmony_scoping);
+ return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
}
ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
v8::Extension* extension,
- bool harmony_scoping) {
+ bool harmony_block_scoping) {
Handle<Script> no_script;
bool allow_lazy = FLAG_lazy && (extension == NULL);
CompleteParserRecorder recorder;
- return DoPreParse(source, allow_lazy, &recorder, harmony_scoping);
+ return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
}
@@ -5194,21 +5195,25 @@ bool ParserApi::Parse(CompilationInfo* info) {
ASSERT(info->function() == NULL);
FunctionLiteral* result = NULL;
Handle<Script> script = info->script();
- bool harmony_scoping = !info->is_native() && FLAG_harmony_scoping;
+ bool harmony_block_scoping = !info->is_native() &&
+ FLAG_harmony_block_scoping;
if (info->is_lazy()) {
- Parser parser(script, true, NULL, NULL);
- parser.SetHarmonyScoping(harmony_scoping);
+ bool allow_natives_syntax =
+ FLAG_allow_natives_syntax ||
+ info->is_native();
+ Parser parser(script, allow_natives_syntax, NULL, NULL);
+ parser.SetHarmonyBlockScoping(harmony_block_scoping);
result = parser.ParseLazy(info);
} else {
// Whether we allow %identifier(..) syntax.
bool allow_natives_syntax =
- info->allows_natives_syntax() || FLAG_allow_natives_syntax;
+ info->is_native() || FLAG_allow_natives_syntax;
ScriptDataImpl* pre_data = info->pre_parse_data();
Parser parser(script,
allow_natives_syntax,
info->extension(),
pre_data);
- parser.SetHarmonyScoping(harmony_scoping);
+ parser.SetHarmonyBlockScoping(harmony_block_scoping);
if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation();
const char* message = pre_data->BuildMessage();
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 359bb38482..3312f2f56a 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -164,13 +164,13 @@ class ParserApi {
// Generic preparser generating full preparse data.
static ScriptDataImpl* PreParse(UC16CharacterStream* source,
v8::Extension* extension,
- bool harmony_scoping);
+ bool harmony_block_scoping);
// Preparser that only does preprocessing that makes sense if only used
// immediately after.
static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
v8::Extension* extension,
- bool harmony_scoping);
+ bool harmony_block_scoping);
};
// ----------------------------------------------------------------------------
@@ -436,7 +436,7 @@ class Parser {
void ReportMessageAt(Scanner::Location loc,
const char* message,
Vector<Handle<String> > args);
- void SetHarmonyScoping(bool block_scoping);
+ void SetHarmonyBlockScoping(bool block_scoping);
private:
// Limit on number of function parameters is chosen arbitrarily.
@@ -533,6 +533,11 @@ class Parser {
ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
+ Expression* NewCompareNode(Token::Value op,
+ Expression* x,
+ Expression* y,
+ int position);
+
// Populate the constant properties fixed array for a materialized object
// literal.
void BuildObjectLiteralConstantProperties(
@@ -651,7 +656,7 @@ class Parser {
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
// Parser support
- VariableProxy* Declare(Handle<String> name, VariableMode mode,
+ VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
FunctionLiteral* fun,
bool resolve,
bool* ok);
@@ -731,7 +736,7 @@ class Parser {
// Heuristically that means that the function will be called immediately,
// so never lazily compile it.
bool parenthesized_function_;
- bool harmony_scoping_;
+ bool harmony_block_scoping_;
friend class LexicalScope;
};
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 20bd837931..685ec3c786 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -333,126 +333,44 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
size_ = size;
}
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
+ return address_ != MAP_FAILED;
}
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
+ kMmapFd, kMmapFdOffset)) {
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
+ UpdateAllocatedSpaceLimits(address, size);
return true;
}
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 90f45dd163..b152dae9a6 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -78,6 +78,30 @@ double ceiling(double x) {
static Mutex* limit_mutex = NULL;
+static void* GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+ uint64_t rnd1 = V8::RandomPrivate(isolate);
+ uint64_t rnd2 = V8::RandomPrivate(isolate);
+ uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ uint32_t raw_addr = V8::RandomPrivate(isolate);
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+ raw_addr &= 0x3ffff000;
+ raw_addr += 0x20000000;
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
+}
+
+
void OS::Setup() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
@@ -357,9 +381,9 @@ size_t OS::AllocateAlignment() {
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = OS::GetRandomMmapAddr();
+ void* addr = GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
@@ -429,12 +453,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
int size = ftell(file);
void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -449,18 +468,13 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
return NULL;
}
void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
+ if (memory_) munmap(memory_, size_);
fclose(file_);
}
@@ -539,14 +553,10 @@ void OS::SignalCodeMovingGC() {
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(kGCFakeMmap, "w+");
- void* addr = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_EXEC,
- MAP_PRIVATE,
- fileno(f),
- 0);
+ void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+ fileno(f), 0);
ASSERT(addr != MAP_FAILED);
- OS::Free(addr, size);
+ munmap(addr, size);
fclose(f);
}
@@ -588,126 +598,44 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
+ address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
size_ = size;
}
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
+ return address_ != MAP_FAILED;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
+ if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
+ kMmapFd, kMmapFdOffset)) {
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
+ UpdateAllocatedSpaceLimits(address, size);
return true;
}
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 6e5d29da2f..6be941a08b 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -94,8 +94,12 @@ static Mutex* limit_mutex = NULL;
void OS::Setup() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly will cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
}
@@ -144,12 +148,9 @@ void* OS::Allocate(const size_t requested,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(OS::GetRandomMmapAddr(),
- msize,
- prot,
+ void* mbase = mmap(NULL, msize, prot,
MAP_PRIVATE | MAP_ANON,
- kMmapFd,
- kMmapFdOffset);
+ kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) {
LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
@@ -206,12 +207,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
int size = ftell(file);
void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
@@ -226,18 +222,13 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
return NULL;
}
void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
+ if (memory_) munmap(memory_, size_);
fclose(file_);
}
@@ -343,102 +334,33 @@ int OS::StackWalk(Vector<StackFrame> frames) {
}
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
bool VirtualMemory::IsReserved() {
- return address_ != NULL;
+ return address_ != MAP_FAILED;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
- size_t size,
- bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address,
- size,
- prot,
+ if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
+ kMmapFd, kMmapFdOffset)) {
return false;
}
@@ -448,22 +370,9 @@ bool VirtualMemory::CommitRegion(void* address,
bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
- return mmap(address,
- size,
- PROT_NONE,
+ return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 3151d18053..973329b9b1 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -245,7 +245,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
+ if (memory_) munmap(memory_, size_);
fclose(file_);
}
@@ -342,8 +342,7 @@ VirtualMemory::VirtualMemory(size_t size) {
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- OS::Free(address(), size());
- address_ = MAP_FAILED
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 78fece3f1d..52cf02963a 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -84,34 +84,6 @@ void OS::Guard(void* address, const size_t size) {
#endif // __CYGWIN__
-void* OS::GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
- // 10.6 and 10.7.
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
// ----------------------------------------------------------------------------
// Math functions
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 7e0153f07a..035d394453 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -54,7 +54,6 @@
#include "platform.h"
#include "vm-state-inl.h"
-#include "v8threads.h"
// It seems there is a bug in some Solaris distributions (experienced in
@@ -84,33 +83,6 @@ namespace internal {
static const pthread_t kNoThread = (pthread_t) 0;
-static void* GetRandomMmapAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc).
- raw_addr &= 0x3ffff000;
- raw_addr += 0x20000000;
-#endif
- return reinterpret_cast<void*>(raw_addr);
- }
- return NULL;
-}
-
-
double ceiling(double x) {
return ceil(x);
}
@@ -350,126 +322,43 @@ static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
size_ = size;
}
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- Address base = static_cast<Address>(reservation);
- Address aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
+ return address_ != MAP_FAILED;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd, kMmapFdOffset)) {
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
+ UpdateAllocatedSpaceLimits(address, size);
return true;
}
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 8771c43679..97788e2f66 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -1397,101 +1397,41 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
}
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size);
- if (address == NULL) return;
- Address base = RoundUp(static_cast<Address>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- ASSERT(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != NULL) {
- request_size = size;
- ASSERT(base == static_cast<Address>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size);
- if (address == NULL) return;
- }
- address_ = address;
- size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
- ASSERT(result);
- USE(result);
- }
-}
-
-
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+ size_ = size;
}
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- if (CommitRegion(address, size, is_executable)) {
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
}
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return UncommitRegion(address, size);
}
-void* VirtualMemory::ReserveRegion(size_t size) {
- return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+ if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
return false;
}
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
+ UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
return true;
}
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return VirtualFree(base, 0, MEM_RELEASE) != 0;
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return VirtualFree(address, size, MEM_DECOMMIT) != false;
}
-
// ----------------------------------------------------------------------------
// Win32 thread support.
@@ -1513,7 +1453,6 @@ class Thread::PlatformData : public Malloced {
public:
explicit PlatformData(HANDLE thread) : thread_(thread) {}
HANDLE thread_;
- unsigned thread_id_;
};
@@ -1557,15 +1496,13 @@ void Thread::Start() {
ThreadEntry,
this,
0,
- &data_->thread_id_));
+ NULL));
}
// Wait for thread to terminate.
void Thread::Join() {
- if (data_->thread_id_ != GetCurrentThreadId()) {
- WaitForSingleObject(data_->thread_, INFINITE);
- }
+ WaitForSingleObject(data_->thread_, INFINITE);
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index f84b6b17a5..034fe3404d 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -178,9 +178,6 @@ class OS {
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
- // Generate a random address to be used for hinting mmap().
- static void* GetRandomMmapAddr();
-
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
@@ -304,46 +301,23 @@ class OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-contructing. This removes the reserved memory
-// from the original object.
+
class VirtualMemory {
public:
- // Empty VirtualMemory object, controlling no reserved memory.
- VirtualMemory();
-
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size);
-
- // Reserves virtual memory containing an area of the given size that
- // is aligned per alignment. This may not be at the position returned
- // by address().
- VirtualMemory(size_t size, size_t alignment);
-
- // Releases the reserved memory, if any, controlled by this VirtualMemory
- // object.
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved();
- // Initialize or resets an embedded VirtualMemory object.
- void Reset();
-
// Returns the start address of the reserved memory.
- // If the memory was reserved with an alignment, this address is not
- // necessarily aligned. The user might need to round it up to a multiple of
- // the alignment to get the start of the aligned block.
void* address() {
ASSERT(IsReserved());
return address_;
}
- // Returns the size of the reserved memory. The returned value is only
- // meaningful when IsReserved() returns true.
- // If the memory was reserved with an alignment, this size may be larger
- // than the requested size.
+ // Returns the size of the reserved memory.
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
@@ -352,43 +326,11 @@ class VirtualMemory {
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
- void Release() {
- ASSERT(IsReserved());
- // Notice: Order is important here. The VirtualMemory object might live
- // inside the allocated region.
- void* address = address_;
- size_t size = size_;
- Reset();
- bool result = ReleaseRegion(address, size);
- USE(result);
- ASSERT(result);
- }
-
- // Assign control of the reserved region to a different VirtualMemory object.
- // The old object is no longer functional (IsReserved() returns false).
- void TakeControl(VirtualMemory* from) {
- ASSERT(!IsReserved());
- address_ = from->address_;
- size_ = from->size_;
- from->Reset();
- }
-
- static void* ReserveRegion(size_t size);
-
- static bool CommitRegion(void* base, size_t size, bool is_executable);
-
- static bool UncommitRegion(void* base, size_t size);
-
- // Must be called with a base pointer that has been returned by ReserveRegion
- // and the same size it was reserved with.
- static bool ReleaseRegion(void* base, size_t size);
-
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
-
// ----------------------------------------------------------------------------
// Thread
//
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 9f8e1eecc2..47d21bac15 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -117,18 +117,7 @@ void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
- // (Ecma 262 5th Edition, clause 14):
- // SourceElement:
- // Statement
- // FunctionDeclaration
- //
- // In harmony mode we allow additionally the following productions
- // SourceElement:
- // LetDeclaration
-
switch (peek()) {
- case i::Token::FUNCTION:
- return ParseFunctionDeclaration(ok);
case i::Token::LET:
return ParseVariableStatement(kSourceElement, ok);
default:
@@ -236,19 +225,8 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
case i::Token::TRY:
return ParseTryStatement(ok);
- case i::Token::FUNCTION: {
- i::Scanner::Location start_location = scanner_->peek_location();
- Statement statement = ParseFunctionDeclaration(CHECK_OK);
- i::Scanner::Location end_location = scanner_->location();
- if (strict_mode()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
- *ok = false;
- return Statement::Default();
- } else {
- return statement;
- }
- }
+ case i::Token::FUNCTION:
+ return ParseFunctionDeclaration(ok);
case i::Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -293,10 +271,14 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(i::Token::LBRACE, CHECK_OK);
while (peek() != i::Token::RBRACE) {
- if (harmony_scoping_) {
- ParseSourceElement(CHECK_OK);
- } else {
- ParseStatement(CHECK_OK);
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseSourceElement(CHECK_OK);
+ i::Scanner::Location end_location = scanner_->location();
+ if (strict_mode() && statement.IsFunctionDeclaration()) {
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ return Statement::Default();
}
}
Expect(i::Token::RBRACE, ok);
@@ -390,11 +372,18 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
Expression expr = ParseExpression(true, CHECK_OK);
if (expr.IsRawIdentifier()) {
- ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(!strict_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
- if (peek() == i::Token::COLON) {
+ if (peek() == i::Token::COLON &&
+ (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
Consume(i::Token::COLON);
- return ParseStatement(ok);
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseStatement(CHECK_OK);
+ if (strict_mode() && statement.IsFunctionDeclaration()) {
+ i::Scanner::Location end_location = scanner_->location();
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ }
+ return Statement::Default();
}
// Preparsing is disabled for extensions (because the extension details
// aren't passed to lazily compiled functions), so we don't
@@ -524,7 +513,15 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
Expect(i::Token::DEFAULT, CHECK_OK);
Expect(i::Token::COLON, CHECK_OK);
} else {
- ParseStatement(CHECK_OK);
+ i::Scanner::Location start_location = scanner_->peek_location();
+ Statement statement = ParseStatement(CHECK_OK);
+ if (strict_mode() && statement.IsFunctionDeclaration()) {
+ i::Scanner::Location end_location = scanner_->location();
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
+ *ok = false;
+ return Statement::Default();
+ }
}
token = peek();
}
@@ -1437,16 +1434,9 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
ReportMessageAt(location.beg_pos, location.end_pos,
"reserved_word", NULL);
*ok = false;
- return GetIdentifierSymbol();
}
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- if (strict_mode()) {
- i::Scanner::Location location = scanner_->location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "strict_reserved_word", NULL);
- *ok = false;
- }
// FALLTHROUGH
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
case i::Token::IDENTIFIER:
return GetIdentifierSymbol();
default:
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index cb1d5fb4eb..b97b7cff60 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -447,7 +447,7 @@ class PreParser {
stack_overflow_(false),
allow_lazy_(true),
parenthesized_function_(false),
- harmony_scoping_(scanner->HarmonyScoping()) { }
+ harmony_block_scoping_(scanner->HarmonyBlockScoping()) { }
// Preparse the program. Only called in PreParseProgram after creating
// the instance.
@@ -608,7 +608,7 @@ class PreParser {
bool stack_overflow_;
bool allow_lazy_;
bool parenthesized_function_;
- bool harmony_scoping_;
+ bool harmony_block_scoping_;
};
} } // v8::preparser
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 37c76ceefe..663af284b4 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -372,6 +372,13 @@ void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
}
+void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
+ Print("(");
+ Visit(node->expression());
+ Print("%s null)", Token::String(node->op()));
+}
+
+
void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
Print("<this-function>");
}
@@ -1013,6 +1020,15 @@ void AstPrinter::VisitCompareOperation(CompareOperation* node) {
}
+void AstPrinter::VisitCompareToNull(CompareToNull* node) {
+ const char* name = node->is_strict()
+ ? "COMPARE-TO-NULL-STRICT"
+ : "COMPARE-TO-NULL";
+ IndentedScope indent(this, name, node);
+ Visit(node->expression());
+}
+
+
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION");
}
@@ -1388,6 +1404,16 @@ void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
}
+void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
+ TagScope tag(this, "CompareToNull");
+ {
+ AttributesScope attributes(this);
+ AddAttribute("is_strict", expr->is_strict());
+ }
+ Visit(expr->expression());
+}
+
+
void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
TagScope tag(this, "ThisFunction");
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index bae35c89ed..adf55ad2e0 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -488,6 +488,8 @@ void CpuProfile::Print() {
CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
+const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
+ CodeMap::CodeEntryInfo(NULL, 0);
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -1401,12 +1403,10 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
if (entry != NULL) {
void* value = entry->value;
entries_map_.Remove(from, AddressHash(from));
- if (to != NULL) {
- entry = entries_map_.Lookup(to, AddressHash(to), true);
- // We can have an entry at the new location, it is OK, as GC can overwrite
- // dead objects with alive objects being moved.
- entry->value = value;
- }
+ entry = entries_map_.Lookup(to, AddressHash(to), true);
+ // We can have an entry at the new location, it is OK, as GC can overwrite
+ // dead objects with alive objects being moved.
+ entry->value = value;
}
}
@@ -1528,8 +1528,6 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
- // First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
AssertNoAllocation no_allocation;
HeapObject* object = NULL;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
@@ -1837,13 +1835,12 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
}
-int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
+int V8HeapExplorer::EstimateObjectsCount() {
+ HeapIterator iterator(HeapIterator::kFilterUnreachable);
int objects_count = 0;
- for (HeapObject* obj = iterator->next();
+ for (HeapObject* obj = iterator.next();
obj != NULL;
- obj = iterator->next()) {
- objects_count++;
- }
+ obj = iterator.next(), ++objects_count) {}
return objects_count;
}
@@ -1971,14 +1968,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
}
- if (map->prototype_transitions() != heap_->empty_fixed_array()) {
- TagObject(map->prototype_transitions(), "(prototype transitions)");
- SetInternalReference(obj,
- entry,
- "prototype_transitions",
- map->prototype_transitions(),
- Map::kPrototypeTransitionsOffset);
- }
SetInternalReference(obj, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
@@ -2209,11 +2198,9 @@ class RootsReferencesExtractor : public ObjectVisitor {
bool V8HeapExplorer::IterateAndExtractReferences(
SnapshotFillerInterface* filler) {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
-
filler_ = filler;
+ HeapIterator iterator(HeapIterator::kFilterUnreachable);
bool interrupted = false;
-
// Heap iteration with filtering must be finished in any case.
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -2779,43 +2766,13 @@ class SnapshotFiller : public SnapshotFillerInterface {
bool HeapSnapshotGenerator::GenerateSnapshot() {
v8_heap_explorer_.TagGlobalObjects();
- // TODO(1562) Profiler assumes that any object that is in the heap after
- // full GC is reachable from the root when computing dominators.
- // This is not true for weakly reachable objects.
- // As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
-
-#ifdef DEBUG
- Heap* debug_heap = Isolate::Current()->heap();
- ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
- ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
- ASSERT(!debug_heap->code_space()->was_swept_conservatively());
- ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
- ASSERT(!debug_heap->map_space()->was_swept_conservatively());
-#endif
-
- // The following code uses heap iterators, so we want the heap to be
- // stable. It should follow TagGlobalObjects as that can allocate.
AssertNoAllocation no_alloc;
-#ifdef DEBUG
- debug_heap->Verify();
-#endif
-
SetProgressTotal(4); // 2 passes + dominators + sizes.
-#ifdef DEBUG
- debug_heap->Verify();
-#endif
-
// Pass 1. Iterate heap contents to count entries and references.
if (!CountEntriesAndReferences()) return false;
-#ifdef DEBUG
- debug_heap->Verify();
-#endif
-
// Allocate and fill entries in the snapshot, allocate references.
snapshot_->AllocateEntries(entries_.entries_count(),
entries_.total_children_count(),
@@ -2853,9 +2810,8 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == NULL) return;
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
progress_total_ = (
- v8_heap_explorer_.EstimateObjectsCount(&iterator) +
+ v8_heap_explorer_.EstimateObjectsCount() +
dom_explorer_.EstimateObjectsCount()) * iterations_count;
progress_counter_ = 0;
}
@@ -2905,7 +2861,7 @@ void HeapSnapshotGenerator::FillReversePostorderIndexes(
nodes_to_visit.RemoveLast();
}
}
- ASSERT_EQ(current_entry, entries->length());
+ entries->Truncate(current_entry);
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 0eb73bef97..da1fdc33ef 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -257,7 +257,7 @@ class CodeMap {
typedef Address Key;
typedef CodeEntryInfo Value;
static const Key kNoKey;
- static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
+ static const Value kNoValue;
static int Compare(const Key& a, const Key& b) {
return a < b ? -1 : (a > b ? 1 : 0);
}
@@ -550,10 +550,7 @@ class HeapEntry BASE_EMBEDDED {
Vector<HeapGraphEdge*> retainers() {
return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
HeapEntry* dominator() { return dominator_; }
- void set_dominator(HeapEntry* entry) {
- ASSERT(entry != NULL);
- dominator_ = entry;
- }
+ void set_dominator(HeapEntry* entry) { dominator_ = entry; }
void clear_paint() { painted_ = kUnpainted; }
bool painted_reachable() { return painted_ == kPainted; }
@@ -923,7 +920,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
virtual HeapEntry* AllocateEntry(
HeapThing ptr, int children_count, int retainers_count);
void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount(HeapIterator* iterator);
+ int EstimateObjectsCount();
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
void TagGlobalObjects();
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index ee2e8c844e..e7d9fc5345 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -115,9 +115,11 @@ class MapTransitionDescriptor: public Descriptor {
class ElementsTransitionDescriptor: public Descriptor {
public:
ElementsTransitionDescriptor(String* key,
- Object* map_or_array)
- : Descriptor(key, map_or_array, PropertyDetails(NONE,
- ELEMENTS_TRANSITION)) { }
+ Map* map,
+ ElementsKind elements_kind)
+ : Descriptor(key, map, PropertyDetails(NONE,
+ ELEMENTS_TRANSITION,
+ elements_kind)) { }
};
// Marks a field name in a map so that adding the field is guaranteed
@@ -200,9 +202,9 @@ class LookupResult BASE_EMBEDDED {
number_ = entry;
}
- void HandlerResult(JSProxy* proxy) {
+ void HandlerResult() {
lookup_type_ = HANDLER_TYPE;
- holder_ = proxy;
+ holder_ = NULL;
details_ = PropertyDetails(NONE, HANDLER);
cacheable_ = false;
}
@@ -219,12 +221,7 @@ class LookupResult BASE_EMBEDDED {
JSObject* holder() {
ASSERT(IsFound());
- return JSObject::cast(holder_);
- }
-
- JSProxy* proxy() {
- ASSERT(IsFound());
- return JSProxy::cast(holder_);
+ return holder_;
}
PropertyType type() {
@@ -357,7 +354,7 @@ class LookupResult BASE_EMBEDDED {
CONSTANT_TYPE
} lookup_type_;
- JSReceiver* holder_;
+ JSObject* holder_;
int number_;
bool cacheable_;
PropertyDetails details_;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index a51f09ae50..4e44cd4ef3 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -41,20 +41,14 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
throw MakeTypeError("handler_non_object", ["create"])
if (!IS_SPEC_FUNCTION(callTrap))
throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
- var construct
if (IS_UNDEFINED(constructTrap)) {
- construct = DerivedConstructTrap(callTrap)
- } else if (IS_SPEC_FUNCTION(constructTrap)) {
- construct = function() {
- // Make sure the trap receives 'undefined' as this.
- return %Apply(constructTrap, void 0, arguments, 0, %_ArgumentsLength());
- }
- } else {
+ constructTrap = callTrap
+ } else if (!IS_SPEC_FUNCTION(constructTrap)) {
throw MakeTypeError("trap_function_expected",
["createFunction", "construct"])
}
return %CreateJSFunctionProxy(
- handler, callTrap, construct, $Function.prototype)
+ handler, callTrap, constructTrap, $Function.prototype)
}
@@ -63,17 +57,6 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) {
// Builtins
////////////////////////////////////////////////////////////////////////////////
-function DerivedConstructTrap(callTrap) {
- return function() {
- var proto = this.prototype
- if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
- var obj = new $Object()
- obj.__proto__ = proto
- var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
- return IS_SPEC_OBJECT(result) ? result : obj
- }
-}
-
function DelegateCallAndConstruct(callTrap, constructTrap) {
return function() {
return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index f8432784f2..b32d71dba5 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler* assembler) :
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 4);
+ const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 0ab86f3338..38d4496153 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -95,11 +95,12 @@ function RegExpConstructor(pattern, flags) {
}
}
+
// Deprecated RegExp.prototype.compile method. We behave like the constructor
// were called again. In SpiderMonkey, this method returns the regexp object.
// In JSC, it returns undefined. For compatibility with JSC, we match their
// behavior.
-function RegExpCompile(pattern, flags) {
+function CompileRegExp(pattern, flags) {
// Both JSC and SpiderMonkey treat a missing pattern argument as the
// empty subject string, and an actual undefined value passed as the
// pattern as the string 'undefined'. Note that JSC is inconsistent
@@ -107,11 +108,6 @@ function RegExpCompile(pattern, flags) {
// RegExp.prototype.compile and in the constructor, where they are
// the empty string. For compatibility with JSC, we match their
// behavior.
- if (this == $RegExp.prototype) {
- // We don't allow recompiling RegExp.prototype.
- throw MakeTypeError('incompatible_method_receiver',
- ['RegExp.prototype.compile', this]);
- }
if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
DoConstructRegExp(this, 'undefined', flags);
} else {
@@ -412,6 +408,7 @@ var lastMatchInfoOverride = null;
function SetUpRegExp() {
%CheckIsBootstrapping();
%FunctionSetInstanceClassName($RegExp, 'RegExp');
+ %FunctionSetPrototype($RegExp, new $Object());
%SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
%SetCode($RegExp, RegExpConstructor);
@@ -419,7 +416,7 @@ function SetUpRegExp() {
"exec", RegExpExec,
"test", RegExpTest,
"toString", RegExpToString,
- "compile", RegExpCompile
+ "compile", CompileRegExp
));
// The length of compile is 1 in SpiderMonkey.
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 520dd39890..26d8846107 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -35,7 +35,6 @@
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
-#include "isolate-inl.h"
#include "mark-compact.h"
#include "platform.h"
#include "scopeinfo.h"
@@ -339,8 +338,7 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window_[i];
- if (function != NULL &&
- !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
+ if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
sampler_window_[i] = NULL;
}
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index e0f507e177..3ea93049c0 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -42,7 +42,6 @@
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
-#include "isolate-inl.h"
#include "jsregexp.h"
#include "json-parser.h"
#include "liveedit.h"
@@ -178,7 +177,6 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
// Pixel elements cannot be created using an object literal.
ASSERT(!copy->HasExternalArrayElements());
switch (copy->GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
FixedArray* elements = FixedArray::cast(copy->elements());
if (elements->map() == heap->fixed_cow_array_map()) {
@@ -191,9 +189,6 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
} else {
for (int i = 0; i < elements->length(); i++) {
Object* value = elements->get(i);
- ASSERT(value->IsSmi() ||
- value->IsTheHole() ||
- (copy->GetElementsKind() == FAST_ELEMENTS));
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
{ MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -422,9 +417,6 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
-static const int kSmiOnlyLiteralMinimumLength = 1024;
-
-
static Handle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
@@ -434,38 +426,22 @@ static Handle<Object> CreateArrayLiteralBoilerplate(
JSFunction::GlobalContextFromLiterals(*literals)->array_function());
Handle<Object> object = isolate->factory()->NewJSObject(constructor);
- if (elements->length() > kSmiOnlyLiteralMinimumLength) {
- Handle<Map> smi_array_map = isolate->factory()->GetElementsTransitionMap(
- Handle<JSObject>::cast(object),
- FAST_SMI_ONLY_ELEMENTS);
- HeapObject::cast(*object)->set_map(*smi_array_map);
- }
-
const bool is_cow =
(elements->map() == isolate->heap()->fixed_cow_array_map());
Handle<FixedArray> copied_elements =
is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
- bool has_non_smi = false;
if (is_cow) {
+#ifdef DEBUG
// Copy-on-write arrays must be shallow (and simple).
for (int i = 0; i < content->length(); i++) {
- Object* current = content->get(i);
- ASSERT(!current->IsFixedArray());
- if (!current->IsSmi() && !current->IsTheHole()) {
- has_non_smi = true;
- }
- }
-#if DEBUG
- for (int i = 0; i < content->length(); i++) {
ASSERT(!content->get(i)->IsFixedArray());
}
#endif
} else {
for (int i = 0; i < content->length(); i++) {
- Object* current = content->get(i);
- if (current->IsFixedArray()) {
+ if (content->get(i)->IsFixedArray()) {
// The value contains the constant_properties of a
// simple object or array literal.
Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
@@ -473,23 +449,12 @@ static Handle<Object> CreateArrayLiteralBoilerplate(
CreateLiteralBoilerplate(isolate, literals, fa);
if (result.is_null()) return result;
content->set(i, *result);
- has_non_smi = true;
- } else {
- if (!current->IsSmi() && !current->IsTheHole()) {
- has_non_smi = true;
- }
}
}
}
// Set the elements.
- Handle<JSArray> js_object(Handle<JSArray>::cast(object));
- isolate->factory()->SetContent(js_object, content);
-
- if (has_non_smi && js_object->HasFastSmiOnlyElements()) {
- isolate->factory()->EnsureCanContainNonSmiElements(js_object);
- }
-
+ Handle<JSArray>::cast(object)->SetContent(*content);
return object;
}
@@ -720,8 +685,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_CHECKED(JSReceiver, key, 1);
- return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
+ // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
+ // because they cannot be cast to JSObject to get an identity hash code.
+ CONVERT_ARG_CHECKED(JSObject, key, 1);
+ return weakmap->table()->Lookup(*key);
}
@@ -729,9 +696,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
- CONVERT_ARG_CHECKED(JSReceiver, key, 1);
+ // TODO(mstarzinger): See Runtime_WeakMapGet above.
+ CONVERT_ARG_CHECKED(JSObject, key, 1);
Handle<Object> value(args[2]);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+ Handle<ObjectHashTable> table(weakmap->table());
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
weakmap->set_table(*new_table);
return *value;
@@ -1243,17 +1211,46 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty()) {
- // We found an existing property. Unless it was an interceptor
- // that claims the property is absent, skip this declaration.
- if (lookup.type() != INTERCEPTOR) {
- continue;
- }
+ // Determine if the property is local by comparing the holder
+ // against the global object. The information will be used to
+ // avoid throwing re-declaration errors when declaring
+ // variables or constants that exist in the prototype chain.
+ bool is_local = (*global == lookup.holder());
+ // Get the property attributes and determine if the property is
+ // read-only.
PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- if (attributes != ABSENT) {
+ bool is_read_only = (attributes & READ_ONLY) != 0;
+ if (lookup.type() == INTERCEPTOR) {
+ // If the interceptor says the property is there, we
+ // just return undefined without overwriting the property.
+ // Otherwise, we continue to setting the property.
+ if (attributes != ABSENT) {
+ // Check if the existing property conflicts with regards to const.
+ if (is_local && (is_read_only || is_const_property)) {
+ const char* type = (is_read_only) ? "const" : "var";
+ return ThrowRedeclarationError(isolate, type, name);
+ };
+ // The property already exists without conflicting: Go to
+ // the next declaration.
+ continue;
+ }
+ // Fall-through and introduce the absent property by using
+ // SetProperty.
+ } else {
+ // For const properties, we treat a callback with this name
+ // even in the prototype as a conflicting declaration.
+ if (is_const_property && (lookup.type() == CALLBACKS)) {
+ return ThrowRedeclarationError(isolate, "const", name);
+ }
+ // Otherwise, we check for locally conflicting declarations.
+ if (is_local && (is_read_only || is_const_property)) {
+ const char* type = (is_read_only) ? "const" : "var";
+ return ThrowRedeclarationError(isolate, type, name);
+ }
+ // The property already exists without conflicting: Go to
+ // the next declaration.
continue;
}
- // Fall-through and introduce the absent property by using
- // SetProperty.
}
} else {
is_function_declaration = true;
@@ -1270,6 +1267,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
LookupResult lookup;
global->LocalLookup(*name, &lookup);
+ // There's a local property that we need to overwrite because
+ // we're either declaring a function or there's an interceptor
+ // that claims the property is absent.
+ //
+ // Check for conflicting re-declarations. We cannot have
+ // conflicting types in case of intercepted properties because
+ // they are absent.
+ if (lookup.IsProperty() &&
+ (lookup.type() != INTERCEPTOR) &&
+ (lookup.IsReadOnly() || is_const_property)) {
+ const char* type = (lookup.IsReadOnly()) ? "const" : "var";
+ return ThrowRedeclarationError(isolate, type, name);
+ }
+
// Compute the property attributes. According to ECMA-262, section
// 13, page 71, the property must be read-only and
// non-deletable. However, neither SpiderMonkey nor KJS creates the
@@ -1324,17 +1335,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
- // Declarations are always made in a function or global context. In the
- // case of eval code, the context passed is the context of the caller,
- // which may be some nested context and not the declaration context.
- RUNTIME_ASSERT(args[0]->IsContext());
- Handle<Context> context(Context::cast(args[0])->declaration_context());
-
+ CONVERT_ARG_CHECKED(Context, context, 0);
Handle<String> name(String::cast(args[1]));
PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
Handle<Object> initial_value(args[3], isolate);
+ // Declarations are always done in a function or global context.
+ context = Handle<Context>(context->declaration_context());
+
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
@@ -1343,7 +1352,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
context->Lookup(name, flags, &index, &attributes, &binding_flags);
if (attributes != ABSENT) {
- // The name was declared before; check for conflicting re-declarations.
+ // The name was declared before; check for conflicting
+ // re-declarations: This is similar to the code in parser.cc in
+ // the AstBuildingParser::Declare function.
if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
// Functions are not read-only.
ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1354,41 +1365,53 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Initialize it if necessary.
if (*initial_value != NULL) {
if (index >= 0) {
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
+ // The variable or constant context slot should always be in
+ // the function context or the arguments object.
+ if (holder->IsContext()) {
+ ASSERT(holder.is_identical_to(context));
+ if (((attributes & READ_ONLY) == 0) ||
+ context->get(index)->IsTheHole()) {
+ context->set(index, *initial_value);
+ }
+ } else {
+ // The holder is an arguments object.
+ Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+ Handle<Object> result = SetElement(arguments, index, initial_value,
+ kNonStrictMode);
+ if (result.is_null()) return Failure::Exception();
}
} else {
- // Slow case: The property is in the context extension object of a
- // function context or the global object of a global context.
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ // Slow case: The property is not in the FixedArray part of the context.
+ Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(object, name, initial_value, mode, kNonStrictMode));
+ SetProperty(context_ext, name, initial_value,
+ mode, kNonStrictMode));
}
}
} else {
// The property is not in the function context. It needs to be
- // "declared" in the function context's extension context or as a
- // property of the the global object.
- Handle<JSObject> object;
+ // "declared" in the function context's extension context, or in the
+ // global context.
+ Handle<JSObject> context_ext;
if (context->has_extension()) {
- object = Handle<JSObject>(JSObject::cast(context->extension()));
+ // The function context's extension context exists - use it.
+ context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
} else {
- // Context extension objects are allocated lazily.
- ASSERT(context->IsFunctionContext());
- object = isolate->factory()->NewJSObject(
+ // The function context's extension context does not exists - allocate
+ // it.
+ context_ext = isolate->factory()->NewJSObject(
isolate->context_extension_function());
- context->set_extension(*object);
+ // And store it in the extension slot.
+ context->set_extension(*context_ext);
}
- ASSERT(*object != NULL);
+ ASSERT(*context_ext != NULL);
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!object->HasLocalProperty(*name));
+ ASSERT(!context_ext->HasLocalProperty(*name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -1398,15 +1421,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// SetProperty and no setters are invoked for those since they are
// not real JSObjects.
if (initial_value->IsTheHole() &&
- !object->IsJSContextExtensionObject()) {
+ !context_ext->IsJSContextExtensionObject()) {
LookupResult lookup;
- object->Lookup(*name, &lookup);
+ context_ext->Lookup(*name, &lookup);
if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
return ThrowRedeclarationError(isolate, "const", name);
}
}
RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(object, name, value, mode,
+ SetProperty(context_ext, name, value, mode,
kNonStrictMode));
}
@@ -1442,32 +1465,64 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
- Object* object = global;
+ JSObject* real_holder = global;
LookupResult lookup;
- while (object->IsJSObject() &&
- JSObject::cast(object)->map()->is_hidden_prototype()) {
- JSObject* raw_holder = JSObject::cast(object);
- raw_holder->LocalLookup(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
- HandleScope handle_scope(isolate);
- Handle<JSObject> holder(raw_holder);
- PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
- // Update the raw pointer in case it's changed due to GC.
- raw_holder = *holder;
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- // Found an interceptor that's not read only.
- if (assign) {
- return raw_holder->SetProperty(
- &lookup, *name, args[2], attributes, strict_mode);
- } else {
- return isolate->heap()->undefined_value();
+ while (true) {
+ real_holder->LocalLookup(*name, &lookup);
+ if (lookup.IsProperty()) {
+ // Determine if this is a redeclaration of something read-only.
+ if (lookup.IsReadOnly()) {
+ // If we found readonly property on one of hidden prototypes,
+ // just shadow it.
+ if (real_holder != isolate->context()->global()) break;
+ return ThrowRedeclarationError(isolate, "const", name);
+ }
+
+ // Determine if this is a redeclaration of an intercepted read-only
+ // property and figure out if the property exists at all.
+ bool found = true;
+ PropertyType type = lookup.type();
+ if (type == INTERCEPTOR) {
+ HandleScope handle_scope(isolate);
+ Handle<JSObject> holder(real_holder);
+ PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+ real_holder = *holder;
+ if (intercepted == ABSENT) {
+ // The interceptor claims the property isn't there. We need to
+ // make sure to introduce it.
+ found = false;
+ } else if ((intercepted & READ_ONLY) != 0) {
+ // The property is present, but read-only. Since we're trying to
+ // overwrite it with a variable declaration we must throw a
+ // re-declaration error. However if we found readonly property
+ // on one of hidden prototypes, just shadow it.
+ if (real_holder != isolate->context()->global()) break;
+ return ThrowRedeclarationError(isolate, "const", name);
}
}
+
+ if (found && !assign) {
+ // The global property is there and we're not assigning any value
+ // to it. Just return.
+ return isolate->heap()->undefined_value();
+ }
+
+ // Assign the value (or undefined) to the property.
+ Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
+ return real_holder->SetProperty(
+ &lookup, *name, value, attributes, strict_mode);
}
- object = raw_holder->GetPrototype();
+
+ Object* proto = real_holder->GetPrototype();
+ if (!proto->IsJSObject())
+ break;
+
+ if (!JSObject::cast(proto)->map()->is_hidden_prototype())
+ break;
+
+ real_holder = JSObject::cast(proto);
}
- // Reload global in case the loop above performed a GC.
global = isolate->context()->global();
if (assign) {
return global->SetProperty(*name, args[2], attributes, strict_mode);
@@ -1505,9 +1560,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
attributes);
}
+ // Determine if this is a redeclaration of something not
+ // read-only. In case the result is hidden behind an interceptor we
+ // need to ask it for the property attributes.
if (!lookup.IsReadOnly()) {
+ if (lookup.type() != INTERCEPTOR) {
+ return ThrowRedeclarationError(isolate, "var", name);
+ }
+
+ PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
+
+ // Throw re-declaration error if the intercepted property is present
+ // but not read-only.
+ if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+ return ThrowRedeclarationError(isolate, "var", name);
+ }
+
// Restore global object from context (in case of GC) and continue
- // with setting the value.
+ // with setting the value because the property is either absent or
+ // read-only. We also have to do redo the lookup.
HandleScope handle_scope(isolate);
Handle<GlobalObject> global(isolate->context()->global());
@@ -1524,20 +1595,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
return *value;
}
- // Set the value, but only if we're assigning the initial value to a
+ // Set the value, but only we're assigning the initial value to a
// constant. For now, we determine this by checking if the
// current value is the hole.
- // Strict mode handling not needed (const is disallowed in strict mode).
+ // Strict mode handling not needed (const disallowed in strict mode).
PropertyType type = lookup.type();
if (type == FIELD) {
FixedArray* properties = global->properties();
int index = lookup.GetFieldIndex();
- if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
+ if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
- if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
- !lookup.IsReadOnly()) {
+ if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
global->SetNormalizedProperty(&lookup, *value);
}
} else {
@@ -1557,12 +1627,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<Object> value(args[0], isolate);
ASSERT(!value->IsTheHole());
+ CONVERT_ARG_CHECKED(Context, context, 1);
+ Handle<String> name(String::cast(args[2]));
// Initializations are always done in a function or global context.
- RUNTIME_ASSERT(args[1]->IsContext());
- Handle<Context> context(Context::cast(args[1])->declaration_context());
-
- Handle<String> name(String::cast(args[2]));
+ context = Handle<Context>(context->declaration_context());
int index;
PropertyAttributes attributes;
@@ -1571,19 +1640,39 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes, &binding_flags);
+ // In most situations, the property introduced by the const
+ // declaration should be present in the context extension object.
+ // However, because declaration and initialization are separate, the
+ // property might have been deleted (if it was introduced by eval)
+ // before we reach the initialization point.
+ //
+ // Example:
+ //
+ // function f() { eval("delete x; const x;"); }
+ //
+ // In that case, the initialization behaves like a normal assignment
+ // to property 'x'.
if (index >= 0) {
- ASSERT(holder->IsContext());
- // Property was found in a context. Perform the assignment if we
- // found some non-constant or an uninitialized constant.
- Handle<Context> context = Handle<Context>::cast(holder);
- if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
- context->set(index, *value);
+ if (holder->IsContext()) {
+ // Property was found in a context. Perform the assignment if we
+ // found some non-constant or an uninitialized constant.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+ context->set(index, *value);
+ }
+ } else {
+ // The holder is an arguments object.
+ ASSERT((attributes & READ_ONLY) == 0);
+ Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ SetElement(arguments, index, value, kNonStrictMode));
}
return *value;
}
- // The property could not be found, we introduce it as a property of the
- // global object.
+ // The property could not be found, we introduce it in the global
+ // context.
if (attributes == ABSENT) {
Handle<JSObject> global = Handle<JSObject>(
isolate->context()->global());
@@ -1594,41 +1683,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
return *value;
}
- // The property was present in some function's context extension object,
- // as a property on the subject of a with, or as a property of the global
- // object.
- //
- // In most situations, eval-introduced consts should still be present in
- // the context extension object. However, because declaration and
- // initialization are separate, the property might have been deleted
- // before we reach the initialization point.
- //
- // Example:
- //
- // function f() { eval("delete x; const x;"); }
- //
- // In that case, the initialization behaves like a normal assignment.
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ // The property was present in a context extension object.
+ Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
- if (*object == context->extension()) {
- // This is the property that was introduced by the const declaration.
- // Set it if it hasn't been set before. NOTE: We cannot use
- // GetProperty() to get the current value as it 'unholes' the value.
+ if (*context_ext == context->extension()) {
+ // This is the property that was introduced by the const
+ // declaration. Set it if it hasn't been set before. NOTE: We
+ // cannot use GetProperty() to get the current value as it
+ // 'unholes' the value.
LookupResult lookup;
- object->LocalLookupRealNamedProperty(*name, &lookup);
+ context_ext->LocalLookupRealNamedProperty(*name, &lookup);
ASSERT(lookup.IsProperty()); // the property was declared
ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
PropertyType type = lookup.type();
if (type == FIELD) {
- FixedArray* properties = object->properties();
+ FixedArray* properties = context_ext->properties();
int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
- if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
- object->SetNormalizedProperty(&lookup, *value);
+ if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
+ context_ext->SetNormalizedProperty(&lookup, *value);
}
} else {
// We should not reach here. Any real, named property should be
@@ -1636,13 +1713,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
UNREACHABLE();
}
} else {
- // The property was found on some other object. Set it if it is not a
- // read-only property.
+ // The property was found in a different context extension object.
+ // Set it if it is not a read-only property.
if ((attributes & READ_ONLY) == 0) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(object, name, value, attributes, kNonStrictMode));
+ SetProperty(context_ext, name, value, attributes, kNonStrictMode));
}
}
@@ -1663,19 +1740,6 @@ RUNTIME_FUNCTION(MaybeObject*,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NonSmiElementStored) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- if (object->HasFastSmiOnlyElements()) {
- MaybeObject* maybe_map = object->GetElementsTransitionMap(FAST_ELEMENTS);
- Map* map;
- if (!maybe_map->To<Map>(&map)) return maybe_map;
- object->set_map(Map::cast(map));
- }
- return *object;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -1761,7 +1825,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
+ SKIP_WRITE_BARRIER);
return regexp;
}
@@ -2175,7 +2239,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
}
- target->set_literals(*literals);
+ // It's okay to skip the write barrier here because the literals
+ // are guaranteed to be in old space.
+ target->set_literals(*literals, SKIP_WRITE_BARRIER);
target->set_next_function_link(isolate->heap()->undefined_value());
if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
@@ -2259,8 +2325,7 @@ class FixedArrayBuilder {
public:
explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
: array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
- length_(0),
- has_non_smi_elements_(false) {
+ length_(0) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(initial_capacity > 0);
@@ -2268,8 +2333,7 @@ class FixedArrayBuilder {
explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
: array_(backing_store),
- length_(0),
- has_non_smi_elements_(false) {
+ length_(0) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(backing_store->length() > 0);
@@ -2297,15 +2361,12 @@ class FixedArrayBuilder {
}
void Add(Object* value) {
- ASSERT(!value->IsSmi());
ASSERT(length_ < capacity());
array_->set(length_, value);
length_++;
- has_non_smi_elements_ = true;
}
void Add(Smi* value) {
- ASSERT(value->IsSmi());
ASSERT(length_ < capacity());
array_->set(length_, value);
length_++;
@@ -2330,7 +2391,7 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- FACTORY->SetContent(target_array, array_);
+ target_array->set_elements(*array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -2338,7 +2399,6 @@ class FixedArrayBuilder {
private:
Handle<FixedArray> array_;
int length_;
- bool has_non_smi_elements_;
};
@@ -2833,7 +2893,7 @@ void FindStringIndicesDispatch(Isolate* isolate,
}
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
- if (pattern_content.IsAscii()) {
+ if (pattern->IsAsciiRepresentation()) {
FindStringIndices(isolate,
subject_vector,
pattern_content.ToAsciiVector(),
@@ -2959,7 +3019,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
// Shortcut for simple non-regexp global replacements
if (is_global &&
- regexp_handle->TypeTag() == JSRegExp::ATOM &&
+ regexp->TypeTag() == JSRegExp::ATOM &&
compiled_replacement.simple_hint()) {
if (subject_handle->HasOnlyAsciiChars() &&
replacement_handle->HasOnlyAsciiChars()) {
@@ -3182,9 +3242,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Address end_of_string = answer->address() + string_size;
isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
- MemoryChunk::IncrementLiveBytes(answer->address(), -delta);
- }
return *answer;
}
@@ -3944,13 +4001,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
// Slow case.
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return *isolate->factory()->infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
char* str = DoubleToRadixCString(value, radix);
MaybeObject* result =
@@ -3966,13 +4023,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return *isolate->factory()->infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
@@ -3991,13 +4048,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return *isolate->factory()->infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
@@ -4016,13 +4073,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
if (isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
}
- return *isolate->factory()->infinity_symbol();
+ return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
@@ -4212,7 +4269,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
Object* fun = args[3];
- RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
+ RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
CONVERT_CHECKED(Smi, flag_attr, args[4]);
int unchecked = flag_attr->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -4380,14 +4437,6 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
return isolate->Throw(*error);
}
- if (object->IsJSProxy()) {
- bool has_pending_exception = false;
- Handle<Object> name = Execution::ToString(key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- return JSProxy::cast(*object)->SetProperty(
- String::cast(*name), *value, attr, strict_mode);
- }
-
// If the object isn't a JavaScript object, we ignore the store.
if (!object->IsJSObject()) return *value;
@@ -4507,7 +4556,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index;
- if (key->ToArrayIndex(&index)) {
+ if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
@@ -4518,7 +4567,8 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
return isolate->heap()->true_value();
}
- return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
+ return JSObject::cast(*receiver)->DeleteElement(
+ index, JSReceiver::FORCE_DELETION);
}
Handle<String> key_string;
@@ -4680,24 +4730,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSReceiver, receiver, args[0]);
- CONVERT_CHECKED(String, key, args[1]);
- bool result = receiver->HasProperty(key);
- if (isolate->has_pending_exception()) return Failure::Exception();
- return isolate->heap()->ToBoolean(result);
+ // Only JS receivers can have properties.
+ if (args[0]->IsJSReceiver()) {
+ JSReceiver* receiver = JSReceiver::cast(args[0]);
+ CONVERT_CHECKED(String, key, args[1]);
+ if (receiver->HasProperty(key)) return isolate->heap()->true_value();
+ }
+ return isolate->heap()->false_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSReceiver, receiver, args[0]);
- CONVERT_CHECKED(Smi, index, args[1]);
- bool result = receiver->HasElement(index->value());
- if (isolate->has_pending_exception()) return Failure::Exception();
- return isolate->heap()->ToBoolean(result);
+ // Only JS objects can have elements.
+ if (args[0]->IsJSObject()) {
+ JSObject* object = JSObject::cast(args[0]);
+ CONVERT_CHECKED(Smi, index_obj, args[1]);
+ uint32_t index = index_obj->value();
+ if (object->HasElement(index)) return isolate->heap()->true_value();
+ }
+ return isolate->heap()->false_value();
}
@@ -4710,37 +4765,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
uint32_t index;
if (key->AsArrayIndex(&index)) {
- JSObject::LocalElementType type = object->HasLocalElement(index);
- switch (type) {
- case JSObject::UNDEFINED_ELEMENT:
- case JSObject::STRING_CHARACTER_ELEMENT:
- return isolate->heap()->false_value();
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT:
- return isolate->heap()->true_value();
- case JSObject::DICTIONARY_ELEMENT: {
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- if (proto->IsNull()) {
- return isolate->heap()->false_value();
- }
- ASSERT(proto->IsJSGlobalObject());
- object = JSObject::cast(proto);
- }
- FixedArray* elements = FixedArray::cast(object->elements());
- NumberDictionary* dictionary = NULL;
- if (elements->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
- dictionary = NumberDictionary::cast(elements->get(1));
- } else {
- dictionary = NumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != NumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- return isolate->heap()->ToBoolean(!details.IsDontEnum());
- }
- }
+ return isolate->heap()->ToBoolean(object->HasElement(index));
}
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
@@ -5554,7 +5579,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
StringType* new_string = StringType::cast(new_object);
Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
+ new_string->address() + SeqAsciiString::kHeaderSize);
if (comma) *(write_cursor++) = ',';
*(write_cursor++) = '"';
@@ -5642,15 +5667,16 @@ static MaybeObject* QuoteJsonString(Isolate* isolate,
StringType* new_string = StringType::cast(new_object);
ASSERT(isolate->heap()->new_space()->Contains(new_string));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
+ new_string->address() + SeqAsciiString::kHeaderSize);
if (comma) *(write_cursor++) = ',';
write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
write_cursor,
characters);
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize));
+ new_string->address() + SeqAsciiString::kHeaderSize));
isolate->heap()->new_space()->
template ShrinkStringAtAllocationBoundary<StringType>(
new_string, final_length);
@@ -5728,8 +5754,9 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
StringType* new_string = StringType::cast(new_object);
ASSERT(isolate->heap()->new_space()->Contains(new_string));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize);
+ new_string->address() + SeqAsciiString::kHeaderSize);
*(write_cursor++) = '[';
for (int i = 0; i < length; i++) {
if (i != 0) *(write_cursor++) = ',';
@@ -5750,7 +5777,7 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
- new_string->address() + SeqString::kHeaderSize));
+ new_string->address() + SeqAsciiString::kHeaderSize));
isolate->heap()->new_space()->
template ShrinkStringAtAllocationBoundary<StringType>(
new_string, final_length);
@@ -6119,7 +6146,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
+ return unibrow::WhiteSpace::Is(c) || c == 0x200b;
}
@@ -6202,8 +6229,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
- if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastElements());
@@ -6250,11 +6275,11 @@ static int CopyCachedAsciiCharsToArray(Heap* heap,
FixedArray* ascii_cache = heap->single_character_string_cache();
Object* undefined = heap->undefined_value();
int i;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (i = 0; i < length; ++i) {
Object* value = ascii_cache->get(chars[i]);
if (value == undefined) break;
- elements->set(i, value, mode);
+ ASSERT(!heap->InNewSpace(value));
+ elements->set(i, value, SKIP_WRITE_BARRIER);
}
if (i < length) {
ASSERT(Smi::FromInt(0) == 0);
@@ -6578,9 +6603,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
- MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
- if (maybe_result->IsFailure()) return maybe_result;
-
int special_length = special->length();
if (!array->HasFastElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6808,8 +6830,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, elements_array, args[0]);
- RUNTIME_ASSERT(elements_array->HasFastElements() ||
- elements_array->HasFastSmiOnlyElements());
+ RUNTIME_ASSERT(elements_array->HasFastElements());
CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
CONVERT_CHECKED(String, separator, args[2]);
// elements_array is fast-mode JSarray of alternating positions
@@ -7926,9 +7947,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
}
-static SmartArrayPointer<Handle<Object> > GetNonBoundArguments(
- int bound_argc,
- int* total_argc) {
+static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
+ int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
@@ -7944,11 +7964,10 @@ static SmartArrayPointer<Handle<Object> > GetNonBoundArguments(
&args_slots);
*total_argc = bound_argc + args_count;
- SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
+ SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = args_slots[i].GetValue();
- param_data[bound_argc + i] = val;
+ param_data[bound_argc + i] = val.location();
}
return param_data;
} else {
@@ -7957,11 +7976,10 @@ static SmartArrayPointer<Handle<Object> > GetNonBoundArguments(
int args_count = frame->ComputeParametersCount();
*total_argc = bound_argc + args_count;
- SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
+ SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = Handle<Object>(frame->GetParameter(i));
- param_data[bound_argc + i] = val;
+ param_data[bound_argc + i] = val.location();
}
return param_data;
}
@@ -7979,17 +7997,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
int bound_argc = 0;
if (!args[1]->IsNull()) {
CONVERT_ARG_CHECKED(JSArray, params, 1);
- RUNTIME_ASSERT(params->HasFastTypeElements());
+ RUNTIME_ASSERT(params->HasFastElements());
bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
bound_argc = Smi::cast(params->length())->value();
}
int total_argc = 0;
- SmartArrayPointer<Handle<Object> > param_data =
+ SmartArrayPointer<Object**> param_data =
GetNonBoundArguments(bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
Handle<Object> val = Handle<Object>(bound_args->get(i));
- param_data[i] = val;
+ param_data[i] = val.location();
}
bool exception = false;
@@ -8178,31 +8196,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
-class ActivationsFinder : public ThreadVisitor {
- public:
- explicit ActivationsFinder(JSFunction* function)
- : function_(function), has_activations_(false) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- if (has_activations_) return;
-
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == function_) {
- has_activations_ = true;
- return;
- }
- }
- }
-
- bool has_activations() { return has_activations_; }
-
- private:
- JSFunction* function_;
- bool has_activations_;
-};
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8249,24 +8242,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Find other optimized activations of the function.
- bool has_other_activations = false;
+ // Count the number of optimized activations of the function.
+ int activations = 0;
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized() && frame->function() == *function) {
- has_other_activations = true;
- break;
+ activations++;
}
it.Advance();
}
- if (!has_other_activations) {
- ActivationsFinder activations_finder(*function);
- isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
- has_other_activations = activations_finder.has_activations();
- }
-
- if (!has_other_activations) {
+ if (activations == 0) {
if (FLAG_trace_deopt) {
PrintF("[removing optimized code for: ");
function->PrintName();
@@ -8321,8 +8307,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
- // The least significant bit (after untagging) indicates whether the
- // function is currently optimized, regardless of reason.
if (!V8::UseCrankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
@@ -8495,11 +8479,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
argv[i] = Handle<Object>(object);
}
- bool threw;
+ bool threw = false;
Handle<JSReceiver> hfun(fun);
Handle<Object> hreceiver(receiver);
- Handle<Object> result =
- Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+ Handle<Object> result = Execution::Call(
+ hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8662,10 +8646,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
}
// The slot was found in a JSObject, either a context extension object,
- // the global object, or the subject of a with. Try to delete it
- // (respecting DONT_DELETE).
+ // the global object, or an arguments object. Try to delete it
+ // (respecting DONT_DELETE). For consistency with V8's usual behavior,
+ // which allows deleting all parameters in functions that mention
+ // 'arguments', we do this even for the case of slots found on an
+ // arguments object. The slot was found on an arguments object if the
+ // index is non-negative.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
+ if (index >= 0) {
+ return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
+ } else {
+ return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
+ }
}
@@ -8750,19 +8742,24 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
&attributes,
&binding_flags);
- // If the index is non-negative, the slot has been found in a context.
+ // If the index is non-negative, the slot has been found in a local
+ // variable or a parameter. Read it from the context object or the
+ // arguments object.
if (index >= 0) {
- ASSERT(holder->IsContext());
- // If the "property" we were looking for is a local variable, the
- // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
+ // If the "property" we were looking for is a local variable or an
+ // argument in a context, the receiver is the global object; see
+ // ECMA-262, 3rd., 10.1.6 and 10.2.3.
//
- // Use the hole as the receiver to signal that the receiver is implicit
- // and that the global receiver should be used (as distinguished from an
- // explicit receiver that happens to be a global object).
+ // Use the hole as the receiver to signal that the receiver is
+ // implicit and that the global receiver should be used.
Handle<Object> receiver = isolate->factory()->the_hole_value();
- Object* value = Context::cast(*holder)->get(index);
+ MaybeObject* value = (holder->IsContext())
+ ? Context::cast(*holder)->get(index)
+ : JSObject::cast(*holder)->GetElement(index);
// Check for uninitialized bindings.
- if (binding_flags == MUTABLE_CHECK_INITIALIZED && value->IsTheHole()) {
+ if (holder->IsContext() &&
+ binding_flags == MUTABLE_CHECK_INITIALIZED &&
+ value->IsTheHole()) {
Handle<Object> reference_error =
isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1));
@@ -8772,18 +8769,25 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
}
}
- // Otherwise, if the slot was found the holder is a context extension
- // object, subject of a with, or a global object. We read the named
- // property from it.
- if (!holder.is_null()) {
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ASSERT(object->HasProperty(*name));
+ // If the holder is found, we read the property from it.
+ if (!holder.is_null() && holder->IsJSObject()) {
+ ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
+ JSObject* object = JSObject::cast(*holder);
+ Object* receiver;
+ if (object->IsGlobalObject()) {
+ receiver = GlobalObject::cast(object)->global_receiver();
+ } else if (context->is_exception_holder(*holder)) {
+ // Use the hole as the receiver to signal that the receiver is
+ // implicit and that the global receiver should be used.
+ receiver = isolate->heap()->the_hole_value();
+ } else {
+ receiver = ComputeReceiverForNonGlobal(isolate, object);
+ }
+
// GetProperty below can cause GC.
- Handle<Object> receiver_handle(object->IsGlobalObject()
- ? GlobalObject::cast(*object)->global_receiver()
- : ComputeReceiverForNonGlobal(isolate, *object));
+ Handle<Object> receiver_handle(receiver);
- // No need to unhole the value here. This is taken care of by the
+ // No need to unhole the value here. This is taken care of by the
// GetProperty function.
MaybeObject* value = object->GetProperty(*name);
return MakePair(value, *receiver_handle);
@@ -8836,37 +8840,45 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
&binding_flags);
if (index >= 0) {
- // The property was found in a context slot.
- Handle<Context> context = Handle<Context>::cast(holder);
- if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
- context->get(index)->IsTheHole()) {
- Handle<Object> error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
- }
- // Ignore if read_only variable.
- if ((attributes & READ_ONLY) == 0) {
- // Context is a fixed array and set cannot fail.
- context->set(index, *value);
- } else if (strict_mode == kStrictMode) {
- // Setting read only property in strict mode.
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_cannot_assign",
- HandleVector(&name, 1));
- return isolate->Throw(*error);
+ if (holder->IsContext()) {
+ Handle<Context> context = Handle<Context>::cast(holder);
+ if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+ context->get(index)->IsTheHole()) {
+ Handle<Object> error =
+ isolate->factory()->NewReferenceError("not_defined",
+ HandleVector(&name, 1));
+ return isolate->Throw(*error);
+ }
+ // Ignore if read_only variable.
+ if ((attributes & READ_ONLY) == 0) {
+ // Context is a fixed array and set cannot fail.
+ context->set(index, *value);
+ } else if (strict_mode == kStrictMode) {
+ // Setting read only property in strict mode.
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_cannot_assign",
+ HandleVector(&name, 1));
+ return isolate->Throw(*error);
+ }
+ } else {
+ ASSERT((attributes & READ_ONLY) == 0);
+ Handle<Object> result =
+ SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
+ if (result.is_null()) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
}
return *value;
}
- // Slow case: The property is not in a context slot. It is either in a
- // context extension object, a property of the subject of a with, or a
- // property of the global object.
- Handle<JSObject> object;
+ // Slow case: The property is not in a FixedArray context.
+ // It is either in an JSObject extension context or it was not found.
+ Handle<JSObject> context_ext;
if (!holder.is_null()) {
- // The property exists on the holder.
- object = Handle<JSObject>::cast(holder);
+ // The property exists in the extension context.
+ context_ext = Handle<JSObject>::cast(holder);
} else {
// The property was not found.
ASSERT(attributes == ABSENT);
@@ -8874,21 +8886,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
if (strict_mode == kStrictMode) {
// Throw in strict mode (assignment to undefined variable).
Handle<Object> error =
- isolate->factory()->NewReferenceError(
- "not_defined", HandleVector(&name, 1));
+ isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
return isolate->Throw(*error);
}
- // In non-strict mode, the property is added to the global object.
+ // In non-strict mode, the property is stored in the global context.
attributes = NONE;
- object = Handle<JSObject>(isolate->context()->global());
+ context_ext = Handle<JSObject>(isolate->context()->global());
}
- // Set the property if it's not read only or doesn't yet exist.
+ // Set the property, but ignore if read_only variable on the context
+ // extension object itself.
if ((attributes & READ_ONLY) == 0 ||
- (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
+ (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(object, name, value, NONE, strict_mode));
+ SetProperty(context_ext, name, value, NONE, strict_mode));
} else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
@@ -9108,10 +9121,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
FlattenString(str);
CONVERT_ARG_CHECKED(JSArray, output, 1);
-
- MaybeObject* maybe_result_array =
- output->EnsureCanContainNonSmiElements();
- if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastElements());
AssertNoAllocation no_allocation;
@@ -9297,9 +9306,6 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
PropertyAttributes attributes = ABSENT;
BindingFlags binding_flags;
while (true) {
- // Don't follow context chains in Context::Lookup and implement the loop
- // up the context chain here, so that we can know the context where eval
- // was found.
receiver = context->Lookup(isolate->factory()->eval_symbol(),
FOLLOW_PROTOTYPE_CHAIN,
&index,
@@ -9415,7 +9421,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(JSObject, element, args[1]);
- RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
+ RUNTIME_ASSERT(array->HasFastElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
@@ -9498,11 +9504,9 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map;
if (fast_elements_) {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_ELEMENTS);
+ map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
} else {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- DICTIONARY_ELEMENTS);
+ map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
}
array->set_map(*map);
array->set_length(*length);
@@ -9646,7 +9650,6 @@ static void CollectElementIndices(Handle<JSObject> object,
List<uint32_t>* indices) {
ElementsKind kind = object->GetElementsKind();
switch (kind) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
uint32_t length = static_cast<uint32_t>(elements->length());
@@ -9766,7 +9769,6 @@ static bool IterateElements(Isolate* isolate,
ArrayConcatVisitor* visitor) {
uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
switch (receiver->GetElementsKind()) {
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
@@ -9995,17 +9997,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
CONVERT_CHECKED(JSArray, to, args[1]);
FixedArrayBase* new_elements = from->elements();
MaybeObject* maybe_new_map;
- ElementsKind elements_kind;
if (new_elements->map() == isolate->heap()->fixed_array_map() ||
new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
- elements_kind = FAST_ELEMENTS;
+ maybe_new_map = to->map()->GetFastElementsMap();
} else if (new_elements->map() ==
isolate->heap()->fixed_double_array_map()) {
- elements_kind = FAST_DOUBLE_ELEMENTS;
+ maybe_new_map = to->map()->GetFastDoubleElementsMap();
} else {
- elements_kind = DICTIONARY_ELEMENTS;
+ maybe_new_map = to->map()->GetSlowElementsMap();
}
- maybe_new_map = to->GetElementsTransitionMap(elements_kind);
Object* new_map;
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
to->set_map(Map::cast(new_map));
@@ -10090,9 +10090,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
}
return *isolate->factory()->NewJSArrayWithElements(keys);
} else {
- ASSERT(array->HasFastElements() ||
- array->HasFastSmiOnlyElements() ||
- array->HasFastDoubleElements());
+ ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
@@ -10211,8 +10209,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
- receiver, structure, name);
+ MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
+ receiver, structure, name, result->holder());
if (!maybe_value->ToObject(&value)) {
if (maybe_value->IsRetryAfterGC()) return maybe_value;
ASSERT(maybe_value->IsException());
@@ -11462,53 +11460,48 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
int target_start_position = RelocInfo::kNoPosition;
Handle<SharedFunctionInfo> target;
while (!done) {
- { // Extra scope for iterator and no-allocation.
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL; obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
- if (shared->script() == *script) {
- // If the SharedFunctionInfo found has the requested script data and
- // contains the source position it is a candidate.
- int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
- start_position = shared->start_position();
- }
- if (start_position <= position &&
- position <= shared->end_position()) {
- // If there is no candidate or this function is within the current
- // candidate this is the new candidate.
- if (target.is_null()) {
- target_start_position = start_position;
- target = shared;
- } else {
- if (target_start_position == start_position &&
- shared->end_position() == target->end_position()) {
- // If a top-level function contain only one function
- // declartion the source for the top-level and the
- // function is the same. In that case prefer the non
- // top-level function.
- if (!shared->is_toplevel()) {
- target_start_position = start_position;
- target = shared;
- }
- } else if (target_start_position <= start_position &&
- shared->end_position() <= target->end_position()) {
- // This containment check includes equality as a function
- // inside a top-level function can share either start or end
- // position with the top-level function.
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL; obj = iterator.next()) {
+ if (obj->IsSharedFunctionInfo()) {
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+ if (shared->script() == *script) {
+ // If the SharedFunctionInfo found has the requested script data and
+ // contains the source position it is a candidate.
+ int start_position = shared->function_token_position();
+ if (start_position == RelocInfo::kNoPosition) {
+ start_position = shared->start_position();
+ }
+ if (start_position <= position &&
+ position <= shared->end_position()) {
+ // If there is no candidate or this function is within the current
+ // candidate this is the new candidate.
+ if (target.is_null()) {
+ target_start_position = start_position;
+ target = shared;
+ } else {
+ if (target_start_position == start_position &&
+ shared->end_position() == target->end_position()) {
+ // If a top-level function contain only one function
+ // declartion the source for the top-level and the function is
+ // the same. In that case prefer the non top-level function.
+ if (!shared->is_toplevel()) {
target_start_position = start_position;
target = shared;
}
+ } else if (target_start_position <= start_position &&
+ shared->end_position() <= target->end_position()) {
+ // This containment check includes equality as a function inside
+ // a top-level function can share either start or end position
+ // with the top-level function.
+ target_start_position = start_position;
+ target = shared;
}
}
}
}
- } // End for loop.
- } // End No allocation scope.
+ }
+ }
if (target.is_null()) {
return isolate->heap()->undefined_value();
@@ -11523,7 +11516,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
// functions which might contain the requested source position.
CompileLazyShared(target, KEEP_EXCEPTION);
}
- } // End while loop.
+ }
return *target;
}
@@ -11889,13 +11882,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
&sinfo, function_context);
// Invoke the evaluation function and return the result.
- Handle<Object> argv[] = { arguments, source };
+ const int argc = 2;
+ Object** argv[argc] = { arguments.location(),
+ Handle<Object>::cast(source).location() };
Handle<Object> result =
- Execution::Call(Handle<JSFunction>::cast(evaluation_function),
- receiver,
- ARRAY_SIZE(argv),
- argv,
- &has_pending_exception);
+ Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
+ argc, argv, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
// Skip the global proxy as it has no properties and always delegates to the
@@ -11974,8 +11966,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
Handle<Object> result =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception);
- // Clear the oneshot breakpoints so that the debugger does not step further.
- isolate->debug()->ClearStepping();
if (has_pending_exception) return Failure::Exception();
return *result;
}
@@ -12003,14 +11993,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
// Return result as a JS array.
Handle<JSObject> result =
isolate->factory()->NewJSObject(isolate->array_function());
- isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
+ Handle<JSArray>::cast(result)->SetContent(*instances);
return *result;
}
// Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(HeapIterator* iterator,
- JSObject* target,
+static int DebugReferencedBy(JSObject* target,
Object* instance_filter, int max_references,
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
@@ -12020,8 +12009,9 @@ static int DebugReferencedBy(HeapIterator* iterator,
// Iterate the heap.
int count = 0;
JSObject* last = NULL;
+ HeapIterator iterator;
HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator->next()) != NULL) &&
+ while (((heap_obj = iterator.next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
if (heap_obj->IsJSObject()) {
@@ -12086,11 +12076,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- // The heap iterator reserves the right to do a GC to make the heap iterable.
- // Due to the GC above we know it won't need to do that, but it seems cleaner
- // to get the heap iterator constructed before we start having unprotected
- // Object* locals that are not protected by handles.
+ isolate->heap()->CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSObject, target, args[0]);
@@ -12100,7 +12086,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
RUNTIME_ASSERT(max_references >= 0);
-
// Get the constructor function for context extension and arguments array.
JSObject* arguments_boilerplate =
isolate->context()->global_context()->arguments_boilerplate();
@@ -12109,9 +12094,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator;
- count = DebugReferencedBy(&heap_iterator,
- target, instance_filter, max_references,
+ count = DebugReferencedBy(target, instance_filter, max_references,
NULL, 0, arguments_function);
// Allocate an array to hold the result.
@@ -12122,34 +12105,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
FixedArray* instances = FixedArray::cast(object);
// Fill the referencing objects.
- // AllocateFixedArray above does not make the heap non-iterable.
- ASSERT(HEAP->IsHeapIterable());
- HeapIterator heap_iterator2;
- count = DebugReferencedBy(&heap_iterator2,
- target, instance_filter, max_references,
+ count = DebugReferencedBy(target, instance_filter, max_references,
instances, count, arguments_function);
// Return result as JS array.
Object* result;
- MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
isolate->context()->global_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- return JSArray::cast(result)->SetContent(instances);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ JSArray::cast(result)->SetContent(instances);
+ return result;
}
// Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(HeapIterator* iterator,
- JSFunction* constructor,
- int max_references,
- FixedArray* instances,
- int instances_size) {
+static int DebugConstructedBy(JSFunction* constructor, int max_references,
+ FixedArray* instances, int instances_size) {
AssertNoAllocation no_alloc;
// Iterate the heap.
int count = 0;
+ HeapIterator iterator;
HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator->next()) != NULL) &&
+ while (((heap_obj = iterator.next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
if (heap_obj->IsJSObject()) {
@@ -12177,7 +12156,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ isolate->heap()->CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -12186,12 +12165,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator;
- count = DebugConstructedBy(&heap_iterator,
- constructor,
- max_references,
- NULL,
- 0);
+ count = DebugConstructedBy(constructor, max_references, NULL, 0);
// Allocate an array to hold the result.
Object* object;
@@ -12200,14 +12174,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
}
FixedArray* instances = FixedArray::cast(object);
- ASSERT(HEAP->IsHeapIterable());
// Fill the referencing objects.
- HeapIterator heap_iterator2;
- count = DebugConstructedBy(&heap_iterator2,
- constructor,
- max_references,
- instances,
- count);
+ count = DebugConstructedBy(constructor, max_references, instances, count);
// Return result as JS array.
Object* result;
@@ -12215,7 +12183,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
isolate->context()->global_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return JSArray::cast(result)->SetContent(instances);
+ JSArray::cast(result)->SetContent(instances);
+ return result;
}
@@ -12279,15 +12248,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
}
-static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
- Script* script,
+static int FindSharedFunctionInfosForScript(Script* script,
FixedArray* buffer) {
AssertNoAllocation no_allocations;
+
int counter = 0;
int buffer_size = buffer->length();
- for (HeapObject* obj = iterator->next();
- obj != NULL;
- obj = iterator->next()) {
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
ASSERT(obj != NULL);
if (!obj->IsSharedFunctionInfo()) {
continue;
@@ -12313,30 +12281,16 @@ RUNTIME_FUNCTION(MaybeObject*,
HandleScope scope(isolate);
CONVERT_CHECKED(JSValue, script_value, args[0]);
-
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
const int kBufferSize = 32;
Handle<FixedArray> array;
array = isolate->factory()->NewFixedArray(kBufferSize);
- int number;
- {
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
- HeapIterator heap_iterator;
- Script* scr = *script;
- FixedArray* arr = *array;
- number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
- }
+ int number = FindSharedFunctionInfosForScript(*script, *array);
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
- isolate->heap()->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
- HeapIterator heap_iterator;
- Script* scr = *script;
- FixedArray* arr = *array;
- FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+ FindSharedFunctionInfosForScript(*script, *array);
}
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
@@ -12817,8 +12771,6 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// Scan the heap for Script objects to find the script with the requested
// script data.
Handle<Script> script;
- script_name->GetHeap()->EnsureHeapIsIterable();
- AssertNoAllocation no_allocation_during_heap_iteration;
HeapIterator iterator;
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -13030,11 +12982,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
// TODO(antonm): consider passing a receiver when constructing a cache.
Handle<Object> receiver(isolate->global_context()->global());
// This handle is nor shared, nor used later, so it's safe.
- Handle<Object> argv[] = { key_handle };
- bool pending_exception;
+ Object** argv[] = { key_handle.location() };
+ bool pending_exception = false;
value = Execution::Call(factory,
receiver,
- ARRAY_SIZE(argv),
+ 1,
argv,
&pending_exception);
if (pending_exception) return Failure::Exception();
@@ -13187,7 +13139,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
@@ -13204,14 +13155,6 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSObject, obj1, args[0]);
- CONVERT_CHECKED(JSObject, obj2, args[1]);
- return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
-}
-
// ----------------------------------------------------------------------------
// Implementation of Runtime
@@ -13279,9 +13222,6 @@ void Runtime::PerformGC(Object* result) {
Isolate* isolate = Isolate::Current();
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
- if (isolate->heap()->new_space()->AddFreshPage()) {
- return;
- }
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
isolate->heap()->CollectGarbage(failure->allocation_space());
@@ -13289,7 +13229,7 @@ void Runtime::PerformGC(Object* result) {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
isolate->counters()->gc_last_resort_from_js()->Increment();
- isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllGarbage(false);
}
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index ed9c2b8891..1538b7d846 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -330,8 +330,6 @@ namespace internal {
F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
\
- /* Arrays */ \
- F(NonSmiElementStored, 1, 1) \
/* Debugging */ \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
@@ -356,7 +354,6 @@ namespace internal {
F(IS_VAR, 1, 1) \
\
/* expose boolean functions from objects-inl.h */ \
- F(HasFastSmiOnlyElements, 1, 1) \
F(HasFastElements, 1, 1) \
F(HasFastDoubleElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
@@ -370,7 +367,6 @@ namespace internal {
F(HasExternalUnsignedIntElements, 1, 1) \
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1) \
- F(HaveSameMap, 2, 1) \
/* profiler */ \
F(ProfilerResume, 0, 1) \
F(ProfilerPause, 0, 1)
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index a12f6c7b09..14ff1b69cf 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -355,7 +355,7 @@ function IN(x) {
if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
- return %_IsNonNegativeSmi(this) ?
+ return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
%HasElement(x, this) : %HasProperty(x, %ToString(this));
}
@@ -429,10 +429,20 @@ function CALL_FUNCTION_PROXY() {
}
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
- var proxy = this;
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
+ var arity = %_ArgumentsLength() - 1;
var trap = %GetConstructTrap(proxy);
- return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
+ var receiver = void 0;
+ if (!IS_UNDEFINED(trap)) {
+ trap = %GetCallTrap(proxy);
+ var proto = proxy.prototype;
+ if (!IS_SPEC_OBJECT(proto) && proto !== null) {
+ throw MakeTypeError("proto_object_or_null", [proto]);
+ }
+ receiver = new global.Object();
+ receiver.__proto__ = proto;
+ }
+ return %Apply(trap, this, arguments, 1, arity);
}
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 95748f2417..69ea8ae6e7 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -95,7 +95,7 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
: Scanner(scanner_contants),
octal_pos_(Location::invalid()),
- harmony_scoping_(false) { }
+ harmony_block_scoping_(false) { }
void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
@@ -872,7 +872,7 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
KEYWORD("instanceof", Token::INSTANCEOF) \
KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('l') \
- KEYWORD("let", harmony_scoping \
+ KEYWORD("let", harmony_block_scoping \
? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
KEYWORD_GROUP('n') \
KEYWORD("new", Token::NEW) \
@@ -906,7 +906,7 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
static Token::Value KeywordOrIdentifierToken(const char* input,
int input_length,
- bool harmony_scoping) {
+ bool harmony_block_scoping) {
ASSERT(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -982,7 +982,7 @@ Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
Vector<const char> chars = next_.literal_chars->ascii_literal();
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
- harmony_scoping_);
+ harmony_block_scoping_);
}
return Token::IDENTIFIER;
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 6651c38755..16c3a427c9 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -509,11 +509,11 @@ class JavaScriptScanner : public Scanner {
// tokens, which is what it is used for.
void SeekForward(int pos);
- bool HarmonyScoping() const {
- return harmony_scoping_;
+ bool HarmonyBlockScoping() const {
+ return harmony_block_scoping_;
}
- void SetHarmonyScoping(bool block_scoping) {
- harmony_scoping_ = block_scoping;
+ void SetHarmonyBlockScoping(bool block_scoping) {
+ harmony_block_scoping_ = block_scoping;
}
@@ -556,7 +556,7 @@ class JavaScriptScanner : public Scanner {
bool has_multiline_comment_before_next_;
// Whether we scan 'let' as a keyword for harmony block scoped
// let bindings.
- bool harmony_scoping_;
+ bool harmony_block_scoping_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 1aa51603db..ad31ca47c6 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -138,7 +138,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length());
context_slots_.Add(FACTORY->empty_symbol());
- context_modes_.Add(INTERNAL);
+ context_modes_.Add(Variable::INTERNAL);
}
}
}
@@ -216,7 +216,7 @@ static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
template <class Allocator>
static Object** ReadList(Object** p,
List<Handle<String>, Allocator>* list,
- List<VariableMode, Allocator>* modes) {
+ List<Variable::Mode, Allocator>* modes) {
ASSERT(list->is_empty());
int n;
p = ReadInt(p, &n);
@@ -226,7 +226,7 @@ static Object** ReadList(Object** p,
p = ReadSymbol(p, &s);
p = ReadInt(p, &m);
list->Add(s);
- modes->Add(static_cast<VariableMode>(m));
+ modes->Add(static_cast<Variable::Mode>(m));
}
return p;
}
@@ -285,7 +285,7 @@ static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
template <class Allocator>
static Object** WriteList(Object** p,
List<Handle<String>, Allocator>* list,
- List<VariableMode, Allocator>* modes) {
+ List<Variable::Mode, Allocator>* modes) {
const int n = list->length();
p = WriteInt(p, n);
for (int i = 0; i < n; i++) {
@@ -456,7 +456,7 @@ int SerializedScopeInfo::StackSlotIndex(String* name) {
return -1;
}
-int SerializedScopeInfo::ContextSlotIndex(String* name, VariableMode* mode) {
+int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
ASSERT(name->IsSymbol());
Isolate* isolate = GetIsolate();
int result = isolate->context_slot_cache()->Lookup(this, name, mode);
@@ -473,7 +473,7 @@ int SerializedScopeInfo::ContextSlotIndex(String* name, VariableMode* mode) {
ASSERT(((p - p0) & 1) == 0);
int v;
ReadInt(p + 1, &v);
- VariableMode mode_value = static_cast<VariableMode>(v);
+ Variable::Mode mode_value = static_cast<Variable::Mode>(v);
if (mode != NULL) *mode = mode_value;
result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
isolate->context_slot_cache()->Update(this, name, mode_value, result);
@@ -482,7 +482,7 @@ int SerializedScopeInfo::ContextSlotIndex(String* name, VariableMode* mode) {
p += 2;
}
}
- isolate->context_slot_cache()->Update(this, name, INTERNAL, -1);
+ isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
return -1;
}
@@ -540,7 +540,7 @@ int ContextSlotCache::Hash(Object* data, String* name) {
int ContextSlotCache::Lookup(Object* data,
String* name,
- VariableMode* mode) {
+ Variable::Mode* mode) {
int index = Hash(data, name);
Key& key = keys_[index];
if ((key.data == data) && key.name->Equals(name)) {
@@ -554,7 +554,7 @@ int ContextSlotCache::Lookup(Object* data,
void ContextSlotCache::Update(Object* data,
String* name,
- VariableMode mode,
+ Variable::Mode mode,
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
@@ -581,7 +581,7 @@ void ContextSlotCache::Clear() {
void ContextSlotCache::ValidateEntry(Object* data,
String* name,
- VariableMode mode,
+ Variable::Mode mode,
int slot_index) {
String* symbol;
if (HEAP->LookupSymbolIfExists(name, &symbol)) {
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index 03f321be7d..40c5c8a687 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -97,7 +97,70 @@ class ScopeInfo BASE_EMBEDDED {
List<Handle<String>, Allocator > parameters_;
List<Handle<String>, Allocator > stack_slots_;
List<Handle<String>, Allocator > context_slots_;
- List<VariableMode, Allocator > context_modes_;
+ List<Variable::Mode, Allocator > context_modes_;
+};
+
+
+// This object provides quick access to scope info details for runtime
+// routines w/o the need to explicitly create a ScopeInfo object.
+class SerializedScopeInfo : public FixedArray {
+ public :
+
+ static SerializedScopeInfo* cast(Object* object) {
+ ASSERT(object->IsSerializedScopeInfo());
+ return reinterpret_cast<SerializedScopeInfo*>(object);
+ }
+
+ // Does this scope call eval?
+ bool CallsEval();
+
+ // Is this scope a strict mode scope?
+ bool IsStrictMode();
+
+ // Return the number of stack slots for code.
+ int NumberOfStackSlots();
+
+ // Return the number of context slots for code.
+ int NumberOfContextSlots();
+
+ // Return if this has context slots besides MIN_CONTEXT_SLOTS;
+ bool HasHeapAllocatedLocals();
+
+ // Lookup support for serialized scope info. Returns the
+ // the stack slot index for a given slot name if the slot is
+ // present; otherwise returns a value < 0. The name must be a symbol
+ // (canonicalized).
+ int StackSlotIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // context slot index for a given slot name if the slot is present; otherwise
+ // returns a value < 0. The name must be a symbol (canonicalized).
+ // If the slot is present and mode != NULL, sets *mode to the corresponding
+ // mode for that variable.
+ int ContextSlotIndex(String* name, Variable::Mode* mode);
+
+ // Lookup support for serialized scope info. Returns the
+ // parameter index for a given parameter name if the parameter is present;
+ // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+ int ParameterIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // function context slot index if the function name is present (named
+ // function expressions, only), otherwise returns a value < 0. The name
+ // must be a symbol (canonicalized).
+ int FunctionContextSlotIndex(String* name);
+
+ static Handle<SerializedScopeInfo> Create(Scope* scope);
+
+ // Serializes empty scope info.
+ static SerializedScopeInfo* Empty();
+
+ private:
+ inline Object** ContextEntriesAddr();
+
+ inline Object** ParameterEntriesAddr();
+
+ inline Object** StackSlotEntriesAddr();
};
@@ -111,12 +174,12 @@ class ContextSlotCache {
// If absent, kNotFound is returned.
int Lookup(Object* data,
String* name,
- VariableMode* mode);
+ Variable::Mode* mode);
// Update an element in the cache.
void Update(Object* data,
String* name,
- VariableMode mode,
+ Variable::Mode mode,
int slot_index);
// Clear the cache.
@@ -138,7 +201,7 @@ class ContextSlotCache {
#ifdef DEBUG
void ValidateEntry(Object* data,
String* name,
- VariableMode mode,
+ Variable::Mode mode,
int slot_index);
#endif
@@ -149,7 +212,7 @@ class ContextSlotCache {
};
struct Value {
- Value(VariableMode mode, int index) {
+ Value(Variable::Mode mode, int index) {
ASSERT(ModeField::is_valid(mode));
ASSERT(IndexField::is_valid(index));
value_ = ModeField::encode(mode) | IndexField::encode(index);
@@ -161,14 +224,14 @@ class ContextSlotCache {
uint32_t raw() { return value_; }
- VariableMode mode() { return ModeField::decode(value_); }
+ Variable::Mode mode() { return ModeField::decode(value_); }
int index() { return IndexField::decode(value_); }
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class ModeField: public BitField<VariableMode, 0, 3> {};
- class IndexField: public BitField<int, 3, 32-3> {};
+ class ModeField: public BitField<Variable::Mode, 0, 3> {};
+ class IndexField: public BitField<int, 3, 32-3> {};
private:
uint32_t value_;
};
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index e67b7f8267..d5a7a9f9ca 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -85,7 +85,7 @@ VariableMap::~VariableMap() {}
Variable* VariableMap::Declare(Scope* scope,
Handle<String> name,
- VariableMode mode,
+ Variable::Mode mode,
bool is_valid_lhs,
Variable::Kind kind) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
@@ -179,7 +179,7 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
++num_var_or_const_;
Variable* variable = variables_.Declare(this,
catch_variable_name,
- VAR,
+ Variable::VAR,
true, // Valid left-hand side.
Variable::NORMAL);
AllocateHeapSlot(variable);
@@ -310,7 +310,7 @@ void Scope::Initialize(bool inside_with) {
Variable* var =
variables_.Declare(this,
isolate_->factory()->this_symbol(),
- VAR,
+ Variable::VAR,
false,
Variable::THIS);
var->AllocateTo(Variable::PARAMETER, -1);
@@ -323,7 +323,7 @@ void Scope::Initialize(bool inside_with) {
// allocated during variable allocation.
variables_.Declare(this,
isolate_->factory()->arguments_symbol(),
- VAR,
+ Variable::VAR,
true,
Variable::ARGUMENTS);
}
@@ -373,11 +373,11 @@ Variable* Scope::LocalLookup(Handle<String> name) {
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
// Check context slot lookup.
- VariableMode mode;
+ Variable::Mode mode;
int index = scope_info_->ContextSlotIndex(*name, &mode);
if (index < 0) {
// Check parameters.
- mode = VAR;
+ mode = Variable::VAR;
index = scope_info_->ParameterIndex(*name);
if (index < 0) {
// Check the function name.
@@ -407,13 +407,13 @@ Variable* Scope::Lookup(Handle<String> name) {
Variable* Scope::DeclareFunctionVar(Handle<String> name) {
ASSERT(is_function_scope() && function_ == NULL);
Variable* function_var =
- new Variable(this, name, CONST, true, Variable::NORMAL);
+ new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
return function_var;
}
-void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
+void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) {
ASSERT(!already_resolved());
ASSERT(is_function_scope());
Variable* var =
@@ -422,12 +422,14 @@ void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
}
-Variable* Scope::DeclareLocal(Handle<String> name, VariableMode mode) {
+Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
ASSERT(!already_resolved());
// This function handles VAR and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
- ASSERT(mode == VAR || mode == CONST || mode == LET);
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST ||
+ mode == Variable::LET);
++num_var_or_const_;
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
@@ -435,7 +437,7 @@ Variable* Scope::DeclareLocal(Handle<String> name, VariableMode mode) {
Variable* Scope::DeclareGlobal(Handle<String> name) {
ASSERT(is_global_scope());
- return variables_.Declare(this, name, DYNAMIC_GLOBAL,
+ return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL,
true,
Variable::NORMAL);
}
@@ -471,7 +473,7 @@ Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
Variable* var = new Variable(this,
name,
- TEMPORARY,
+ Variable::TEMPORARY,
true,
Variable::NORMAL);
temps_.Add(var);
@@ -503,13 +505,13 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
Declaration* decl = decls_[i];
- if (decl->mode() != VAR) continue;
+ if (decl->mode() != Variable::VAR) continue;
Handle<String> name = decl->proxy()->name();
bool cond = true;
for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) {
// There is a conflict if there exists a non-VAR binding.
Variable* other_var = scope->variables_.Lookup(name);
- if (other_var != NULL && other_var->mode() != VAR) {
+ if (other_var != NULL && other_var->mode() != Variable::VAR) {
return decl;
}
@@ -777,9 +779,9 @@ void Scope::Print(int n) {
Indent(n1, "// dynamic vars\n");
if (dynamics_ != NULL) {
- PrintMap(n1, dynamics_->GetMap(DYNAMIC));
- PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
- PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
+ PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC));
+ PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
+ PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
}
// Print inner scopes (disable by providing negative n).
@@ -795,7 +797,7 @@ void Scope::Print(int n) {
#endif // DEBUG
-Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
+Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
VariableMap* map = dynamics_->GetMap(mode);
Variable* var = map->Lookup(name);
@@ -901,7 +903,7 @@ void Scope::ResolveVariable(Scope* global_scope,
// Note that we must do a lookup anyway, because if we find one,
// we must mark that variable as potentially accessed from this
// inner scope (the property may not be in the 'with' object).
- var = NonLocal(proxy->name(), DYNAMIC);
+ var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else {
// We are not inside a local 'with' statement.
@@ -924,13 +926,13 @@ void Scope::ResolveVariable(Scope* global_scope,
} else if (scope_inside_with_) {
// If we are inside a with statement we give up and look up
// the variable at runtime.
- var = NonLocal(proxy->name(), DYNAMIC);
+ var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else if (invalidated_local != NULL) {
// No with statements are involved and we found a local
// variable that might be shadowed by eval introduced
// variables.
- var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
+ var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
var->set_local_if_not_shadowed(invalidated_local);
} else if (outer_scope_is_eval_scope_) {
@@ -940,10 +942,10 @@ void Scope::ResolveVariable(Scope* global_scope,
// variable is global if it is not shadowed by eval-introduced
// variables.
if (context->GlobalIfNotShadowedByEval(proxy->name())) {
- var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+ var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
} else {
- var = NonLocal(proxy->name(), DYNAMIC);
+ var = NonLocal(proxy->name(), Variable::DYNAMIC);
}
} else {
@@ -951,7 +953,7 @@ void Scope::ResolveVariable(Scope* global_scope,
// is not executed with a call to eval. We know that this
// variable is global unless it is shadowed by eval-introduced
// variables.
- var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+ var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
}
}
}
@@ -1038,7 +1040,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
//
// Exceptions: temporary variables are never allocated in a context;
// catch-bound variables are always allocated in a context.
- if (var->mode() == TEMPORARY) return false;
+ if (var->mode() == Variable::TEMPORARY) return false;
if (is_catch_scope() || is_block_scope()) return true;
return var->is_accessed_from_inner_scope() ||
scope_calls_eval_ ||
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 7e789b8bd0..2917a63bba 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -50,7 +50,7 @@ class VariableMap: public HashMap {
Variable* Declare(Scope* scope,
Handle<String> name,
- VariableMode mode,
+ Variable::Mode mode,
bool is_valid_lhs,
Variable::Kind kind);
@@ -64,8 +64,8 @@ class VariableMap: public HashMap {
// and setup time for scopes that don't need them.
class DynamicScopePart : public ZoneObject {
public:
- VariableMap* GetMap(VariableMode mode) {
- int index = mode - DYNAMIC;
+ VariableMap* GetMap(Variable::Mode mode) {
+ int index = mode - Variable::DYNAMIC;
ASSERT(index >= 0 && index < 3);
return &maps_[index];
}
@@ -135,11 +135,11 @@ class Scope: public ZoneObject {
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right.
- void DeclareParameter(Handle<String> name, VariableMode mode);
+ void DeclareParameter(Handle<String> name, Variable::Mode mode);
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
- Variable* DeclareLocal(Handle<String> name, VariableMode mode);
+ Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
// Declare an implicit global variable in this scope which must be a
// global scope. The variable was introduced (possibly from an inner
@@ -406,7 +406,7 @@ class Scope: public ZoneObject {
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
- Variable* NonLocal(Handle<String> name, VariableMode mode);
+ Variable* NonLocal(Handle<String> name, Variable::Mode mode);
// Variable resolution.
Variable* LookupRecursive(Handle<String> name,
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 84ab94a97d..ecb480a8f8 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -300,24 +300,12 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
4,
"HandleScope::DeleteExtensions");
- Add(ExternalReference::
- incremental_marking_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 5,
- "IncrementalMarking::RecordWrite");
- Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
- RUNTIME_ENTRY,
- 6,
- "StoreBuffer::StoreBufferOverflow");
- Add(ExternalReference::
- incremental_evacuation_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 7,
- "IncrementalMarking::RecordWrite");
-
-
// Miscellaneous
+ Add(ExternalReference::the_hole_value_location(isolate).address(),
+ UNCLASSIFIED,
+ 2,
+ "Factory::the_hole_value().location()");
Add(ExternalReference::roots_address(isolate).address(),
UNCLASSIFIED,
3,
@@ -363,133 +351,129 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
UNCLASSIFIED,
- 14,
+ 13,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
UNCLASSIFIED,
- 15,
+ 14,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break(isolate).address(),
UNCLASSIFIED,
- 16,
+ 15,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
UNCLASSIFIED,
- 17,
+ 16,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
UNCLASSIFIED,
- 18,
+ 17,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
UNCLASSIFIED,
- 19,
+ 18,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
UNCLASSIFIED,
- 20,
+ 19,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
UNCLASSIFIED,
- 21,
+ 20,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
UNCLASSIFIED,
- 22,
+ 21,
"mod_two_doubles");
Add(ExternalReference::compare_doubles(isolate).address(),
UNCLASSIFIED,
- 23,
+ 22,
"compare_doubles");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
UNCLASSIFIED,
- 24,
+ 23,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
UNCLASSIFIED,
- 25,
+ 24,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack(isolate).address(),
UNCLASSIFIED,
- 26,
+ 25,
"NativeRegExpMacroAssembler::GrowStack()");
Add(ExternalReference::re_word_character_map().address(),
UNCLASSIFIED,
- 27,
+ 26,
"NativeRegExpMacroAssembler::word_character_map");
#endif // V8_INTERPRETED_REGEXP
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
UNCLASSIFIED,
- 28,
+ 27,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
UNCLASSIFIED,
- 29,
+ 28,
"KeyedLookupCache::field_offsets()");
Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
UNCLASSIFIED,
- 30,
+ 29,
"TranscendentalCache::caches()");
Add(ExternalReference::handle_scope_next_address().address(),
UNCLASSIFIED,
- 31,
+ 30,
"HandleScope::next");
Add(ExternalReference::handle_scope_limit_address().address(),
UNCLASSIFIED,
- 32,
+ 31,
"HandleScope::limit");
Add(ExternalReference::handle_scope_level_address().address(),
UNCLASSIFIED,
- 33,
+ 32,
"HandleScope::level");
Add(ExternalReference::new_deoptimizer_function(isolate).address(),
UNCLASSIFIED,
- 34,
+ 33,
"Deoptimizer::New()");
Add(ExternalReference::compute_output_frames_function(isolate).address(),
UNCLASSIFIED,
- 35,
+ 34,
"Deoptimizer::ComputeOutputFrames()");
Add(ExternalReference::address_of_min_int().address(),
UNCLASSIFIED,
- 36,
+ 35,
"LDoubleConstant::min_int");
Add(ExternalReference::address_of_one_half().address(),
UNCLASSIFIED,
- 37,
+ 36,
"LDoubleConstant::one_half");
Add(ExternalReference::isolate_address().address(),
UNCLASSIFIED,
- 38,
+ 37,
"isolate");
Add(ExternalReference::address_of_minus_zero().address(),
UNCLASSIFIED,
- 39,
+ 38,
"LDoubleConstant::minus_zero");
Add(ExternalReference::address_of_negative_infinity().address(),
UNCLASSIFIED,
- 40,
+ 39,
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function(isolate).address(),
UNCLASSIFIED,
- 41,
+ 40,
"power_double_double_function");
Add(ExternalReference::power_double_int_function(isolate).address(),
UNCLASSIFIED,
- 42,
+ 41,
"power_double_int_function");
- Add(ExternalReference::store_buffer_top(isolate).address(),
- UNCLASSIFIED,
- 43,
- "store_buffer_top");
- Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
+ Add(ExternalReference::arguments_marker_location(isolate).address(),
UNCLASSIFIED,
- 44,
- "canonical_nan");
+ 42,
+ "Factory::arguments_marker().location()");
}
@@ -585,7 +569,6 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
maybe_new_allocation =
reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
}
- ASSERT(!maybe_new_allocation->IsFailure());
Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
HeapObject* new_object = HeapObject::cast(new_allocation);
address = new_object->address();
@@ -594,13 +577,14 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
ASSERT(SpaceIsLarge(space_index));
LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
Object* new_allocation;
- if (space_index == kLargeData || space_index == kLargeFixedArray) {
+ if (space_index == kLargeData) {
+ new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
+ } else if (space_index == kLargeFixedArray) {
new_allocation =
- lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
+ lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
} else {
ASSERT_EQ(kLargeCode, space_index);
- new_allocation =
- lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
+ new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
}
HeapObject* new_object = HeapObject::cast(new_allocation);
// Record all large objects in the same space.
@@ -645,7 +629,6 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
void Deserializer::Deserialize() {
isolate_ = Isolate::Current();
- ASSERT(isolate_ != NULL);
// Don't GC while deserializing - just expand the heap.
AlwaysAllocateScope always_allocate;
// Don't use the free lists while deserializing.
@@ -702,8 +685,9 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
// This routine writes the new object into the pointer provided and then
// returns true if the new object was in young space and false otherwise.
// The reason for this strange interface is that otherwise the object is
-// written very late, which means the FreeSpace map is not set up by the
-// time we need to use it to mark the space at the end of a page free.
+// written very late, which means the ByteArray map is not set up by the
+// time we need to use it to mark the space at the end of a page free (by
+// making it into a byte array).
void Deserializer::ReadObject(int space_number,
Space* space,
Object** write_back) {
@@ -774,9 +758,8 @@ void Deserializer::ReadChunk(Object** current,
if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
ASSIGN_DEST_SPACE(space_number) \
ReadObject(space_number, dest_space, current); \
- emit_write_barrier = (space_number == NEW_SPACE && \
- source_space != NEW_SPACE && \
- source_space != CELL_SPACE); \
+ emit_write_barrier = \
+ (space_number == NEW_SPACE && source_space != NEW_SPACE); \
} else { \
Object* new_object = NULL; /* May not be a real Object pointer. */ \
if (where == kNewObject) { \
@@ -795,16 +778,14 @@ void Deserializer::ReadChunk(Object** current,
Decode(reference_id); \
new_object = reinterpret_cast<Object*>(address); \
} else if (where == kBackref) { \
- emit_write_barrier = (space_number == NEW_SPACE && \
- source_space != NEW_SPACE && \
- source_space != CELL_SPACE); \
+ emit_write_barrier = \
+ (space_number == NEW_SPACE && source_space != NEW_SPACE); \
new_object = GetAddressFromEnd(data & kSpaceMask); \
} else { \
ASSERT(where == kFromStart); \
if (offset_from_start == kUnknownOffsetFromStart) { \
- emit_write_barrier = (space_number == NEW_SPACE && \
- source_space != NEW_SPACE && \
- source_space != CELL_SPACE); \
+ emit_write_barrier = \
+ (space_number == NEW_SPACE && source_space != NEW_SPACE); \
new_object = GetAddressFromStart(data & kSpaceMask); \
} else { \
Address object_address = pages_[space_number][0] + \
@@ -992,11 +973,6 @@ void Deserializer::ReadChunk(Object** current,
break;
}
- case kSkip: {
- current++;
- break;
- }
-
case kNativesStringResource: {
int index = source_->Get();
Vector<const char> source_vector = Natives::GetRawScriptSource(index);
@@ -1121,13 +1097,8 @@ void PartialSerializer::Serialize(Object** object) {
void Serializer::VisitPointers(Object** start, Object** end) {
- Isolate* isolate = Isolate::Current();
-
for (Object** current = start; current < end; current++) {
- if (reinterpret_cast<Address>(current) ==
- isolate->heap()->store_buffer()->TopAddress()) {
- sink_->Put(kSkip, "Skip");
- } else if ((*current)->IsSmi()) {
+ if ((*current)->IsSmi()) {
sink_->Put(kRawData, "RawData");
sink_->PutInt(kPointerSize, "length");
for (int i = 0; i < kPointerSize; i++) {
@@ -1449,7 +1420,7 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
- const Resource* resource = string->resource();
+ Resource* resource = string->resource();
if (resource == *resource_pointer) {
sink_->Put(kNativesStringResource, "NativesStringResource");
sink_->PutSection(i, "NativesStringResourceEnd");
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index c070923326..66d6fb5111 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -238,8 +238,7 @@ class SerializerDeserializer: public ObjectVisitor {
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
- kSkip = 0xc, // Skip a pointer sized cell.
- // 0xd-0xf Free.
+ // 0xc-0xf Free.
kBackref = 0x10, // Object is described relative to end.
// 0x11-0x18 One per space.
// 0x19-0x1f Common backref offsets.
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index d9e6053ad6..35d7224099 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,213 +37,355 @@ namespace internal {
// -----------------------------------------------------------------------------
-// Bitmap
+// PageIterator
-void Bitmap::Clear(MemoryChunk* chunk) {
- Bitmap* bitmap = chunk->markbits();
- for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
- chunk->ResetLiveBytes();
+bool PageIterator::has_next() {
+ return prev_page_ != stop_page_;
+}
+
+
+Page* PageIterator::next() {
+ ASSERT(has_next());
+ prev_page_ = (prev_page_ == NULL)
+ ? space_->first_page_
+ : prev_page_->next_page();
+ return prev_page_;
}
// -----------------------------------------------------------------------------
-// PageIterator
+// Page
+Page* Page::next_page() {
+ return heap_->isolate()->memory_allocator()->GetNextPage(this);
+}
-PageIterator::PageIterator(PagedSpace* space)
- : space_(space),
- prev_page_(&space->anchor_),
- next_page_(prev_page_->next_page()) { }
+Address Page::AllocationTop() {
+ PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
+ return owner->PageAllocationTop(this);
+}
-bool PageIterator::has_next() {
- return next_page_ != &space_->anchor_;
+
+Address Page::AllocationWatermark() {
+ PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
+ if (this == owner->AllocationTopPage()) {
+ return owner->top();
+ }
+ return address() + AllocationWatermarkOffset();
}
-Page* PageIterator::next() {
- ASSERT(has_next());
- prev_page_ = next_page_;
- next_page_ = next_page_->next_page();
- return prev_page_;
+uint32_t Page::AllocationWatermarkOffset() {
+ return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
+ kAllocationWatermarkOffsetShift);
}
-// -----------------------------------------------------------------------------
-// NewSpacePageIterator
+void Page::SetAllocationWatermark(Address allocation_watermark) {
+ if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
+ // When iterating intergenerational references during scavenge
+ // we might decide to promote an encountered young object.
+ // We will allocate a space for such an object and put it
+ // into the promotion queue to process it later.
+ // If space for object was allocated somewhere beyond allocation
+ // watermark this might cause garbage pointers to appear under allocation
+ // watermark. To avoid visiting them during dirty regions iteration
+ // which might be still in progress we store a valid allocation watermark
+ // value and mark this page as having an invalid watermark.
+ SetCachedAllocationWatermark(AllocationWatermark());
+ InvalidateWatermark(true);
+ }
+
+ flags_ = (flags_ & kFlagsMask) |
+ Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
+ ASSERT(AllocationWatermarkOffset()
+ == static_cast<uint32_t>(Offset(allocation_watermark)));
+}
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
- : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
- next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
- last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
+ mc_first_forwarded = allocation_watermark;
+}
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
- : prev_page_(space->anchor()),
- next_page_(prev_page_->next_page()),
- last_page_(prev_page_->prev_page()) { }
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
- : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
- next_page_(NewSpacePage::FromAddress(start)),
- last_page_(NewSpacePage::FromLimit(limit)) {
- SemiSpace::AssertValidRange(start, limit);
+Address Page::CachedAllocationWatermark() {
+ return mc_first_forwarded;
}
-bool NewSpacePageIterator::has_next() {
- return prev_page_ != last_page_;
+uint32_t Page::GetRegionMarks() {
+ return dirty_regions_;
}
-NewSpacePage* NewSpacePageIterator::next() {
- ASSERT(has_next());
- prev_page_ = next_page_;
- next_page_ = next_page_->next_page();
- return prev_page_;
+void Page::SetRegionMarks(uint32_t marks) {
+ dirty_regions_ = marks;
}
-// -----------------------------------------------------------------------------
-// HeapObjectIterator
-HeapObject* HeapObjectIterator::FromCurrentPage() {
- while (cur_addr_ != cur_end_) {
- if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
- cur_addr_ = space_->limit();
- continue;
- }
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_end_);
- if (!obj->IsFiller()) {
- ASSERT_OBJECT_SIZE(obj_size);
- return obj;
+int Page::GetRegionNumberForAddress(Address addr) {
+ // Each page is divided into 256 byte regions. Each region has a corresponding
+ // dirty mark bit in the page header. Region can contain intergenerational
+ // references iff its dirty mark is set.
+ // A normal 8K page contains exactly 32 regions so all region marks fit
+ // into 32-bit integer field. To calculate a region number we just divide
+ // offset inside page by region size.
+ // A large page can contain more then 32 regions. But we want to avoid
+ // additional write barrier code for distinguishing between large and normal
+ // pages so we just ignore the fact that addr points into a large page and
+ // calculate region number as if addr pointed into a normal 8K page. This way
+ // we get a region number modulo 32 so for large pages several regions might
+ // be mapped to a single dirty mark.
+ ASSERT_PAGE_ALIGNED(this->address());
+ STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
+
+ // We are using masking with kPageAlignmentMask instead of Page::Offset()
+ // to get an offset to the beginning of 8K page containing addr not to the
+ // beginning of actual page which can be bigger then 8K.
+ intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
+ return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
+}
+
+
+uint32_t Page::GetRegionMaskForAddress(Address addr) {
+ return 1 << GetRegionNumberForAddress(addr);
+}
+
+
+uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
+ uint32_t result = 0;
+ static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
+ if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
+ result = kAllRegionsDirtyMarks;
+ } else if (length_in_bytes > 0) {
+ int start_region = GetRegionNumberForAddress(start);
+ int end_region =
+ GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
+ uint32_t start_mask = (~0) << start_region;
+ uint32_t end_mask = ~((~1) << end_region);
+ result = start_mask & end_mask;
+ // if end_region < start_region, the mask is ored.
+ if (result == 0) result = start_mask | end_mask;
+ }
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ uint32_t expected = 0;
+ for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
+ expected |= GetRegionMaskForAddress(a);
}
+ ASSERT(expected == result);
}
- return NULL;
+#endif
+ return result;
}
-// -----------------------------------------------------------------------------
-// MemoryAllocator
+void Page::MarkRegionDirty(Address address) {
+ SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
+}
-#ifdef ENABLE_HEAP_PROTECTION
-void MemoryAllocator::Protect(Address start, size_t size) {
- OS::Protect(start, size);
+bool Page::IsRegionDirty(Address address) {
+ return GetRegionMarks() & GetRegionMaskForAddress(address);
}
-void MemoryAllocator::Unprotect(Address start,
- size_t size,
- Executability executable) {
- OS::Unprotect(start, size, executable);
-}
+void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
+ int rstart = GetRegionNumberForAddress(start);
+ int rend = GetRegionNumberForAddress(end);
+
+ if (reaches_limit) {
+ end += 1;
+ }
+ if ((rend - rstart) == 0) {
+ return;
+ }
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Protect(chunks_[id].address(), chunks_[id].size());
+ uint32_t bitmask = 0;
+
+ if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
+ || (start == ObjectAreaStart())) {
+ // First region is fully covered
+ bitmask = 1 << rstart;
+ }
+
+ while (++rstart < rend) {
+ bitmask |= 1 << rstart;
+ }
+
+ if (bitmask) {
+ SetRegionMarks(GetRegionMarks() & ~bitmask);
+ }
}
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
+void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
+ heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
}
-#endif
+bool Page::IsWatermarkValid() {
+ return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
+ heap_->page_watermark_invalidated_mark_;
+}
-// --------------------------------------------------------------------------
-// PagedSpace
-Page* Page::Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
- PagedSpace* owner) {
- Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
- ASSERT(chunk->owner() == owner);
- owner->IncreaseCapacity(Page::kObjectAreaSize);
- owner->Free(page->ObjectAreaStart(),
- static_cast<int>(page->ObjectAreaEnd() -
- page->ObjectAreaStart()));
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+void Page::InvalidateWatermark(bool value) {
+ if (value) {
+ flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ heap_->page_watermark_invalidated_mark_;
+ } else {
+ flags_ =
+ (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ (heap_->page_watermark_invalidated_mark_ ^
+ (1 << WATERMARK_INVALIDATED));
+ }
- return page;
+ ASSERT(IsWatermarkValid() == !value);
}
-bool PagedSpace::Contains(Address addr) {
- Page* p = Page::FromAddress(addr);
- if (!p->is_valid()) return false;
- return p->owner() == this;
+bool Page::GetPageFlag(PageFlag flag) {
+ return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
}
-void MemoryChunk::set_scan_on_scavenge(bool scan) {
- if (scan) {
- if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
- SetFlag(SCAN_ON_SCAVENGE);
+void Page::SetPageFlag(PageFlag flag, bool value) {
+ if (value) {
+ flags_ |= static_cast<intptr_t>(1 << flag);
} else {
- if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
+ flags_ &= ~static_cast<intptr_t>(1 << flag);
}
- heap_->incremental_marking()->SetOldSpacePageFlags(this);
-}
-
-
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
- MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
- OffsetFrom(addr) & ~Page::kPageAlignmentMask);
- if (maybe->owner() != NULL) return maybe;
- LargeObjectIterator iterator(HEAP->lo_space());
- for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
- // Fixed arrays are the only pointer-containing objects in large object
- // space.
- if (o->IsFixedArray()) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
- if (chunk->Contains(addr)) {
- return chunk;
- }
- }
+}
+
+
+void Page::ClearPageFlags() {
+ flags_ = 0;
+}
+
+
+void Page::ClearGCFields() {
+ InvalidateWatermark(true);
+ SetAllocationWatermark(ObjectAreaStart());
+ if (heap_->gc_state() == Heap::SCAVENGE) {
+ SetCachedAllocationWatermark(ObjectAreaStart());
}
- UNREACHABLE();
- return NULL;
+ SetRegionMarks(kAllRegionsCleanMarks);
}
-PointerChunkIterator::PointerChunkIterator(Heap* heap)
- : state_(kOldPointerState),
- old_pointer_iterator_(heap->old_pointer_space()),
- map_iterator_(heap->map_space()),
- lo_iterator_(heap->lo_space()) { }
+bool Page::WasInUseBeforeMC() {
+ return GetPageFlag(WAS_IN_USE_BEFORE_MC);
+}
-Page* Page::next_page() {
- ASSERT(next_chunk()->owner() == owner());
- return static_cast<Page*>(next_chunk());
+void Page::SetWasInUseBeforeMC(bool was_in_use) {
+ SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
}
-Page* Page::prev_page() {
- ASSERT(prev_chunk()->owner() == owner());
- return static_cast<Page*>(prev_chunk());
+bool Page::IsLargeObjectPage() {
+ return !GetPageFlag(IS_NORMAL_PAGE);
}
-void Page::set_next_page(Page* page) {
- ASSERT(page->owner() == owner());
- set_next_chunk(page);
+void Page::SetIsLargeObjectPage(bool is_large_object_page) {
+ SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
}
+Executability Page::PageExecutability() {
+ return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+}
+
+
+void Page::SetPageExecutability(Executability executable) {
+ SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+
+void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
+ address_ = a;
+ size_ = s;
+ owner_ = o;
+ executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
+ owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
+}
+
+
+bool MemoryAllocator::IsValidChunk(int chunk_id) {
+ if (!IsValidChunkId(chunk_id)) return false;
+
+ ChunkInfo& c = chunks_[chunk_id];
+ return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
+}
+
+
+bool MemoryAllocator::IsValidChunkId(int chunk_id) {
+ return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
+}
+
+
+bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
+ ASSERT(p->is_valid());
+
+ int chunk_id = GetChunkId(p);
+ if (!IsValidChunkId(chunk_id)) return false;
+
+ ChunkInfo& c = chunks_[chunk_id];
+ return (c.address() <= p->address()) &&
+ (p->address() < c.address() + c.size()) &&
+ (space == c.owner());
+}
+
+
+Page* MemoryAllocator::GetNextPage(Page* p) {
+ ASSERT(p->is_valid());
+ intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
+ return Page::FromAddress(AddressFrom<Address>(raw_addr));
+}
+
+
+int MemoryAllocator::GetChunkId(Page* p) {
+ ASSERT(p->is_valid());
+ return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
+}
+
+
+void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
+ ASSERT(prev->is_valid());
+ int chunk_id = GetChunkId(prev);
+ ASSERT_PAGE_ALIGNED(next->address());
+ prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
+}
+
+
+PagedSpace* MemoryAllocator::PageOwner(Page* page) {
+ int chunk_id = GetChunkId(page);
+ ASSERT(IsValidChunk(chunk_id));
+ return chunks_[chunk_id].owner();
+}
+
+
+bool MemoryAllocator::InInitialChunk(Address address) {
+ if (initial_chunk_ == NULL) return false;
-void Page::set_prev_page(Page* page) {
- ASSERT(page->owner() == owner());
- set_prev_chunk(page);
+ Address start = static_cast<Address>(initial_chunk_->address());
+ return (start <= address) && (address < start + initial_chunk_->size());
+}
+
+
+// --------------------------------------------------------------------------
+// PagedSpace
+
+bool PagedSpace::Contains(Address addr) {
+ Page* p = Page::FromAddress(addr);
+ if (!p->is_valid()) return false;
+ return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
}
@@ -251,14 +393,15 @@ void Page::set_prev_page(Page* page) {
// not contain slow case logic (eg, move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
+HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
+ int size_in_bytes) {
+ Address current_top = alloc_info->top;
Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
+ if (new_top > alloc_info->limit) return NULL;
- allocation_info_.top = new_top;
- ASSERT(allocation_info_.VerifyPagedAllocation());
- ASSERT(current_top != NULL);
+ alloc_info->top = new_top;
+ ASSERT(alloc_info->VerifyPagedAllocation());
+ accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::FromAddress(current_top);
}
@@ -267,78 +410,54 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT_OBJECT_SIZE(size_in_bytes);
- HeapObject* object = AllocateLinearly(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
-
- object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
+ HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
+ if (object != NULL) return object;
object = SlowAllocateRaw(size_in_bytes);
- if (object != NULL) {
- if (identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- return object;
- }
+ if (object != NULL) return object;
return Failure::RetryAfterGC(identity());
}
-// -----------------------------------------------------------------------------
-// NewSpace
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
- Address old_top = allocation_info_.top;
- if (allocation_info_.limit - old_top < size_in_bytes) {
- Address new_top = old_top + size_in_bytes;
- Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
- high);
- int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
- top_on_previous_step_ = new_top;
- return AllocateRawInternal(size_in_bytes);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
- top_on_previous_step_ = to_space_.page_low();
- return AllocateRawInternal(size_in_bytes);
- } else {
- return Failure::RetryAfterGC();
- }
- }
+// Reallocating (and promoting) objects during a compacting collection.
+MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
+ ASSERT(HasBeenSetup());
+ ASSERT_OBJECT_SIZE(size_in_bytes);
+ HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+ if (object != NULL) return object;
- Object* obj = HeapObject::FromAddress(allocation_info_.top);
- allocation_info_.top += size_in_bytes;
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ object = SlowMCAllocateRaw(size_in_bytes);
+ if (object != NULL) return object;
- return obj;
+ return Failure::RetryAfterGC(identity());
}
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
- return static_cast<LargePage*>(chunk);
+// -----------------------------------------------------------------------------
+// NewSpace
+
+MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
+ AllocationInfo* alloc_info) {
+ Address new_top = alloc_info->top + size_in_bytes;
+ if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
+
+ Object* obj = HeapObject::FromAddress(alloc_info->top);
+ alloc_info->top = new_top;
+#ifdef DEBUG
+ SemiSpace* space =
+ (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
+ ASSERT(space->low() <= alloc_info->top
+ && alloc_info->top <= space->high()
+ && alloc_info->limit == space->high());
+#endif
+ return obj;
}
intptr_t LargeObjectSpace::Available() {
- return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
+ return LargeObjectChunk::ObjectSizeFor(
+ heap()->isolate()->memory_allocator()->Available());
}
@@ -348,23 +467,16 @@ void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
ASSERT(string->IsSeqString());
ASSERT(string->address() + StringType::SizeFor(string->length()) ==
allocation_info_.top);
- Address old_top = allocation_info_.top;
allocation_info_.top =
string->address() + StringType::SizeFor(length);
string->set_length(length);
- if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
- int delta = static_cast<int>(old_top - allocation_info_.top);
- MemoryChunk::IncrementLiveBytes(string->address(), -delta);
- }
}
bool FreeListNode::IsFreeListNode(HeapObject* object) {
- Map* map = object->map();
- Heap* heap = object->GetHeap();
- return map == heap->raw_unchecked_free_space_map()
- || map == heap->raw_unchecked_one_pointer_filler_map()
- || map == heap->raw_unchecked_two_pointer_filler_map();
+ return object->map() == HEAP->raw_unchecked_byte_array_map()
+ || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
+ || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
}
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 61b318118a..97c6d2ac19 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -35,66 +35,52 @@
namespace v8 {
namespace internal {
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+ ASSERT((space).low() <= (info).top \
+ && (info).top <= (space).high() \
+ && (info).limit == (space).high())
// ----------------------------------------------------------------------------
// HeapObjectIterator
HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize as if we have
- // reached the end of the anchor page, then the first iteration will move on
- // to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- NULL);
+ Initialize(space->bottom(), space->top(), NULL);
}
HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
HeapObjectCallback size_func) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize the current
- // address and end as NULL, then the first iteration will move on
- // to the first page.
- Initialize(space,
- NULL,
- NULL,
- kAllPagesInSpace,
- size_func);
+ Initialize(space->bottom(), space->top(), size_func);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
+ Initialize(start, space->top(), NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
+ HeapObjectCallback size_func) {
+ Initialize(start, space->top(), size_func);
}
HeapObjectIterator::HeapObjectIterator(Page* page,
HeapObjectCallback size_func) {
- Space* owner = page->owner();
- ASSERT(owner == HEAP->old_pointer_space() ||
- owner == HEAP->old_data_space() ||
- owner == HEAP->map_space() ||
- owner == HEAP->cell_space() ||
- owner == HEAP->code_space());
- Initialize(reinterpret_cast<PagedSpace*>(owner),
- page->ObjectAreaStart(),
- page->ObjectAreaEnd(),
- kOnePageOnly,
- size_func);
- ASSERT(page->WasSweptPrecisely());
-}
-
-
-void HeapObjectIterator::Initialize(PagedSpace* space,
- Address cur, Address end,
- HeapObjectIterator::PageMode mode,
- HeapObjectCallback size_f) {
- // Check that we actually can iterate this space.
- ASSERT(!space->was_swept_conservatively());
+ Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+}
- space_ = space;
+
+void HeapObjectIterator::Initialize(Address cur, Address end,
+ HeapObjectCallback size_f) {
cur_addr_ = cur;
- cur_end_ = end;
- page_mode_ = mode;
+ end_addr_ = end;
+ end_page_ = Page::FromAllocationTop(end);
size_func_ = size_f;
+ Page* p = Page::FromAllocationTop(cur_addr_);
+ cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
#ifdef DEBUG
Verify();
@@ -102,35 +88,63 @@ void HeapObjectIterator::Initialize(PagedSpace* space,
}
-// We have hit the end of the page and should advance to the next block of
-// objects. This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
- ASSERT(cur_addr_ == cur_end_);
- if (page_mode_ == kOnePageOnly) return false;
- Page* cur_page;
- if (cur_addr_ == NULL) {
- cur_page = space_->anchor();
- } else {
- cur_page = Page::FromAddress(cur_addr_ - 1);
- ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
- }
+HeapObject* HeapObjectIterator::FromNextPage() {
+ if (cur_addr_ == end_addr_) return NULL;
+
+ Page* cur_page = Page::FromAllocationTop(cur_addr_);
cur_page = cur_page->next_page();
- if (cur_page == space_->anchor()) return false;
+ ASSERT(cur_page->is_valid());
+
cur_addr_ = cur_page->ObjectAreaStart();
- cur_end_ = cur_page->ObjectAreaEnd();
- ASSERT(cur_page->WasSweptPrecisely());
- return true;
+ cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
+
+ if (cur_addr_ == end_addr_) return NULL;
+ ASSERT(cur_addr_ < cur_limit_);
+#ifdef DEBUG
+ Verify();
+#endif
+ return FromCurrentPage();
}
#ifdef DEBUG
void HeapObjectIterator::Verify() {
- // TODO(gc): We should do something here.
+ Page* p = Page::FromAllocationTop(cur_addr_);
+ ASSERT(p == Page::FromAllocationTop(cur_limit_));
+ ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
}
#endif
// -----------------------------------------------------------------------------
+// PageIterator
+
+PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
+ prev_page_ = NULL;
+ switch (mode) {
+ case PAGES_IN_USE:
+ stop_page_ = space->AllocationTopPage();
+ break;
+ case PAGES_USED_BY_MC:
+ stop_page_ = space->MCRelocationTopPage();
+ break;
+ case ALL_PAGES:
+#ifdef DEBUG
+ // Verify that the cached last page in the space is actually the
+ // last page.
+ for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
+ if (!p->next_page()->is_valid()) {
+ ASSERT(space->last_page_ == p);
+ }
+ }
+#endif
+ stop_page_ = space->last_page_;
+ break;
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// CodeRange
@@ -157,12 +171,7 @@ bool CodeRange::Setup(const size_t requested) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(code_range_->size() == requested);
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- Address base = reinterpret_cast<Address>(code_range_->address());
- Address aligned_base =
- RoundUp(reinterpret_cast<Address>(code_range_->address()),
- MemoryChunk::kAlignment);
- size_t size = code_range_->size() - (aligned_base - base);
- allocation_list_.Add(FreeBlock(aligned_base, size));
+ allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
current_allocation_block_index_ = 0;
return true;
}
@@ -219,8 +228,7 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
-Address CodeRange::AllocateRawMemory(const size_t requested,
- size_t* allocated) {
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
@@ -228,16 +236,13 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
GetNextAllocationBlock(requested);
}
// Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
+ *allocated = RoundUp(requested, Page::kPageSize);
FreeBlock current = allocation_list_[current_allocation_block_index_];
- if (aligned_requested >= (current.size - Page::kPageSize)) {
+ if (*allocated >= current.size - Page::kPageSize) {
// Don't leave a small free block, useless for a large object or chunk.
*allocated = current.size;
- } else {
- *allocated = aligned_requested;
}
ASSERT(*allocated <= current.size);
- ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!code_range_->Commit(current.start, *allocated, true)) {
*allocated = 0;
return NULL;
@@ -251,8 +256,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
}
-void CodeRange::FreeRawMemory(Address address, size_t length) {
- ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
+void CodeRange::FreeRawMemory(void* address, size_t length) {
free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length);
}
@@ -270,12 +274,35 @@ void CodeRange::TearDown() {
// MemoryAllocator
//
+// 270 is an estimate based on the static default heap size of a pair of 256K
+// semispaces and a 64M old generation.
+const int kEstimatedNumberOfChunks = 270;
+
+
MemoryAllocator::MemoryAllocator(Isolate* isolate)
: isolate_(isolate),
capacity_(0),
capacity_executable_(0),
size_(0),
- size_executable_(0) {
+ size_executable_(0),
+ initial_chunk_(NULL),
+ chunks_(kEstimatedNumberOfChunks),
+ free_chunk_ids_(kEstimatedNumberOfChunks),
+ max_nof_chunks_(0),
+ top_(0) {
+}
+
+
+void MemoryAllocator::Push(int free_chunk_id) {
+ ASSERT(max_nof_chunks_ > 0);
+ ASSERT(top_ < max_nof_chunks_);
+ free_chunk_ids_[top_++] = free_chunk_id;
+}
+
+
+int MemoryAllocator::Pop() {
+ ASSERT(top_ > 0);
+ return free_chunk_ids_[--top_];
}
@@ -284,303 +311,269 @@ bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
ASSERT_GE(capacity_, capacity_executable_);
+ // Over-estimate the size of chunks_ array. It assumes the expansion of old
+ // space is always in the unit of a chunk (kChunkSize) except the last
+ // expansion.
+ //
+ // Due to alignment, allocated space might be one page less than required
+ // number (kPagesPerChunk) of pages for old spaces.
+ //
+ // Reserve two chunk ids for semispaces, one for map space, one for old
+ // space, and one for code space.
+ max_nof_chunks_ =
+ static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
+ if (max_nof_chunks_ > kMaxNofChunks) return false;
+
size_ = 0;
size_executable_ = 0;
-
+ ChunkInfo info; // uninitialized element.
+ for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
+ chunks_.Add(info);
+ free_chunk_ids_.Add(i);
+ }
+ top_ = max_nof_chunks_;
return true;
}
void MemoryAllocator::TearDown() {
- // Check that spaces were torn down before MemoryAllocator.
- ASSERT(size_ == 0);
- // TODO(gc) this will be true again when we fix FreeMemory.
- // ASSERT(size_executable_ == 0);
+ for (int i = 0; i < max_nof_chunks_; i++) {
+ if (chunks_[i].address() != NULL) DeleteChunk(i);
+ }
+ chunks_.Clear();
+ free_chunk_ids_.Clear();
+
+ if (initial_chunk_ != NULL) {
+ LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
+ delete initial_chunk_;
+ initial_chunk_ = NULL;
+ }
+
+ ASSERT(top_ == max_nof_chunks_); // all chunks are free
+ top_ = 0;
capacity_ = 0;
capacity_executable_ = 0;
+ size_ = 0;
+ max_nof_chunks_ = 0;
}
-void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- ASSERT(reservation->IsReserved());
- size_t size = reservation->size();
- ASSERT(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
- if (executable == EXECUTABLE) {
- ASSERT(size_executable_ >= size);
- size_executable_ -= size;
+void* MemoryAllocator::AllocateRawMemory(const size_t requested,
+ size_t* allocated,
+ Executability executable) {
+ if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
+ return NULL;
}
- // Code which is part of the code-range does not have its own VirtualMemory.
- ASSERT(!isolate_->code_range()->contains(
- static_cast<Address>(reservation->address())));
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- reservation->Release();
-}
-
-
-void MemoryAllocator::FreeMemory(Address base,
- size_t size,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- ASSERT(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+ void* mem;
if (executable == EXECUTABLE) {
- ASSERT(size_executable_ >= size);
- size_executable_ -= size;
- }
- if (isolate_->code_range()->contains(static_cast<Address>(base))) {
- ASSERT(executable == EXECUTABLE);
- isolate_->code_range()->FreeRawMemory(base, size);
+ // Check executable memory limit.
+ if (size_executable_ + requested >
+ static_cast<size_t>(capacity_executable_)) {
+ LOG(isolate_,
+ StringEvent("MemoryAllocator::AllocateRawMemory",
+ "V8 Executable Allocation capacity exceeded"));
+ return NULL;
+ }
+ // Allocate executable memory either from code range or from the
+ // OS.
+ if (isolate_->code_range()->exists()) {
+ mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
+ } else {
+ mem = OS::Allocate(requested, allocated, true);
+ }
+ // Update executable memory size.
+ size_executable_ += static_cast<int>(*allocated);
} else {
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- bool result = VirtualMemory::ReleaseRegion(base, size);
- USE(result);
- ASSERT(result);
+ mem = OS::Allocate(requested, allocated, false);
}
+ int alloced = static_cast<int>(*allocated);
+ size_ += alloced;
+
+#ifdef DEBUG
+ ZapBlock(reinterpret_cast<Address>(mem), alloced);
+#endif
+ isolate_->counters()->memory_allocated()->Increment(alloced);
+ return mem;
}
-Address MemoryAllocator::ReserveAlignedMemory(size_t size,
- size_t alignment,
- VirtualMemory* controller) {
- VirtualMemory reservation(size, alignment);
+void MemoryAllocator::FreeRawMemory(void* mem,
+ size_t length,
+ Executability executable) {
+#ifdef DEBUG
+ // Do not try to zap the guard page.
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
+#endif
+ if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
+ isolate_->code_range()->FreeRawMemory(mem, length);
+ } else {
+ OS::Free(mem, length);
+ }
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
+ size_ -= static_cast<int>(length);
+ if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
- if (!reservation.IsReserved()) return NULL;
- size_ += reservation.size();
- Address base = RoundUp(static_cast<Address>(reservation.address()),
- alignment);
- controller->TakeControl(&reservation);
- return base;
+ ASSERT(size_ >= 0);
+ ASSERT(size_executable_ >= 0);
}
-Address MemoryAllocator::AllocateAlignedMemory(size_t size,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller) {
- VirtualMemory reservation;
- Address base = ReserveAlignedMemory(size, alignment, &reservation);
- if (base == NULL) return NULL;
- if (!reservation.Commit(base,
- size,
- executable == EXECUTABLE)) {
- return NULL;
+void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size) {
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ MemoryAllocationCallbackRegistration registration =
+ memory_allocation_callbacks_[i];
+ if ((registration.space & space) == space &&
+ (registration.action & action) == action)
+ registration.callback(space, action, static_cast<int>(size));
}
- controller->TakeControl(&reservation);
- return base;
}
-void Page::InitializeAsAnchor(PagedSpace* owner) {
- set_owner(owner);
- set_prev_page(this);
- set_next_page(this);
+bool MemoryAllocator::MemoryAllocationCallbackRegistered(
+ MemoryAllocationCallback callback) {
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ if (memory_allocation_callbacks_[i].callback == callback) return true;
+ }
+ return false;
}
-NewSpacePage* NewSpacePage::Initialize(Heap* heap,
- Address start,
- SemiSpace* semi_space) {
- MemoryChunk* chunk = MemoryChunk::Initialize(heap,
- start,
- Page::kPageSize,
- NOT_EXECUTABLE,
- semi_space);
- chunk->set_next_chunk(NULL);
- chunk->set_prev_chunk(NULL);
- chunk->initialize_scan_on_scavenge(true);
- bool in_to_space = (semi_space->id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
- heap->incremental_marking()->SetNewSpacePageFlags(page);
- return page;
+void MemoryAllocator::AddMemoryAllocationCallback(
+ MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action) {
+ ASSERT(callback != NULL);
+ MemoryAllocationCallbackRegistration registration(callback, space, action);
+ ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
+ return memory_allocation_callbacks_.Add(registration);
}
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
- set_owner(semi_space);
- set_next_chunk(this);
- set_prev_chunk(this);
- // Flags marks this invalid page as not being in new-space.
- // All real new-space pages will be in new-space.
- SetFlags(0, ~0);
+void MemoryAllocator::RemoveMemoryAllocationCallback(
+ MemoryAllocationCallback callback) {
+ ASSERT(callback != NULL);
+ for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+ if (memory_allocation_callbacks_[i].callback == callback) {
+ memory_allocation_callbacks_.Remove(i);
+ return;
+ }
+ }
+ UNREACHABLE();
}
+void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
+ ASSERT(initial_chunk_ == NULL);
-MemoryChunk* MemoryChunk::Initialize(Heap* heap,
- Address base,
- size_t size,
- Executability executable,
- Space* owner) {
- MemoryChunk* chunk = FromAddress(base);
-
- ASSERT(base == chunk->address());
-
- chunk->heap_ = heap;
- chunk->size_ = size;
- chunk->flags_ = 0;
- chunk->set_owner(owner);
- chunk->InitializeReservedMemory();
- chunk->slots_buffer_ = NULL;
- chunk->skip_list_ = NULL;
- chunk->ResetLiveBytes();
- Bitmap::Clear(chunk);
- chunk->initialize_scan_on_scavenge(false);
- chunk->SetFlag(WAS_SWEPT_PRECISELY);
-
- ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
- ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
-
- if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
-
- if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
+ initial_chunk_ = new VirtualMemory(requested);
+ CHECK(initial_chunk_ != NULL);
+ if (!initial_chunk_->IsReserved()) {
+ delete initial_chunk_;
+ initial_chunk_ = NULL;
+ return NULL;
+ }
- return chunk;
+ // We are sure that we have mapped a block of requested addresses.
+ ASSERT(initial_chunk_->size() == requested);
+ LOG(isolate_,
+ NewEvent("InitialChunk", initial_chunk_->address(), requested));
+ size_ += static_cast<int>(requested);
+ return initial_chunk_->address();
}
-void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
+static int PagesInChunk(Address start, size_t size) {
+ // The first page starts on the first page-aligned address from start onward
+ // and the last page ends on the last page-aligned address before
+ // start+size. Page::kPageSize is a power of two so we can divide by
+ // shifting.
+ return static_cast<int>((RoundDown(start + size, Page::kPageSize)
+ - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
}
-void MemoryChunk::Unlink() {
- if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
- heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
-}
+Page* MemoryAllocator::AllocatePages(int requested_pages,
+ int* allocated_pages,
+ PagedSpace* owner) {
+ if (requested_pages <= 0) return Page::FromAddress(NULL);
+ size_t chunk_size = requested_pages * Page::kPageSize;
+ void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
+ if (chunk == NULL) return Page::FromAddress(NULL);
+ LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
- Executability executable,
- Space* owner) {
- size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
- Heap* heap = isolate_->heap();
- Address base = NULL;
- VirtualMemory reservation;
- if (executable == EXECUTABLE) {
- // Check executable memory limit.
- if (size_executable_ + chunk_size > capacity_executable_) {
- LOG(isolate_,
- StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
- return NULL;
- }
+ *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
- // Allocate executable memory either from code range or from the
- // OS.
- if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
- MemoryChunk::kAlignment));
- if (base == NULL) return NULL;
- size_ += chunk_size;
- // Update executable memory size.
- size_executable_ += chunk_size;
- } else {
- base = AllocateAlignedMemory(chunk_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
- if (base == NULL) return NULL;
- // Update executable memory size.
- size_executable_ += reservation.size();
- }
- } else {
- base = AllocateAlignedMemory(chunk_size,
- MemoryChunk::kAlignment,
- executable,
- &reservation);
+ // We may 'lose' a page due to alignment.
+ ASSERT(*allocated_pages >= kPagesPerChunk - 1);
- if (base == NULL) return NULL;
- }
-
-#ifdef DEBUG
- ZapBlock(base, chunk_size);
-#endif
- isolate_->counters()->memory_allocated()->
- Increment(static_cast<int>(chunk_size));
+ size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
- LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
- if (owner != NULL) {
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+ // Check that we got at least one page that we can use.
+ if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
+ FreeRawMemory(chunk,
+ chunk_size,
+ owner->executable());
+ LOG(isolate_, DeleteEvent("PagedChunk", chunk));
+ return Page::FromAddress(NULL);
}
- MemoryChunk* result = MemoryChunk::Initialize(heap,
- base,
- chunk_size,
- executable,
- owner);
- result->set_reserved_memory(&reservation);
- return result;
-}
-
-
-Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
-
- if (chunk == NULL) return NULL;
+ if (guard_size != 0) {
+ OS::Guard(chunk, guard_size);
+ chunk_size -= guard_size;
+ chunk = static_cast<Address>(chunk) + guard_size;
+ --*allocated_pages;
+ }
- return Page::Initialize(isolate_->heap(), chunk, executable, owner);
-}
+ int chunk_id = Pop();
+ chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
+ ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+ PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+ Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
- Executability executable,
- Space* owner) {
- MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
- if (chunk == NULL) return NULL;
- return LargePage::Initialize(isolate_->heap(), chunk);
+ return new_pages;
}
-void MemoryAllocator::Free(MemoryChunk* chunk) {
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- if (chunk->owner() != NULL) {
- ObjectSpace space =
- static_cast<ObjectSpace>(1 << chunk->owner()->identity());
- PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
+Page* MemoryAllocator::CommitPages(Address start, size_t size,
+ PagedSpace* owner, int* num_pages) {
+ ASSERT(start != NULL);
+ *num_pages = PagesInChunk(start, size);
+ ASSERT(*num_pages > 0);
+ ASSERT(initial_chunk_ != NULL);
+ ASSERT(InInitialChunk(start));
+ ASSERT(InInitialChunk(start + size - 1));
+ if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
+ return Page::FromAddress(NULL);
}
+#ifdef DEBUG
+ ZapBlock(start, size);
+#endif
+ isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
- delete chunk->slots_buffer();
- delete chunk->skip_list();
-
- VirtualMemory* reservation = chunk->reserved_memory();
- if (reservation->IsReserved()) {
- FreeMemory(reservation, chunk->executable());
- } else {
- FreeMemory(chunk->address(),
- chunk->size(),
- chunk->executable());
- }
+ // So long as we correctly overestimated the number of chunks we should not
+ // run out of chunk ids.
+ CHECK(!OutOfChunkIds());
+ int chunk_id = Pop();
+ chunks_[chunk_id].init(start, size, owner);
+ return InitializePagesInChunk(chunk_id, *num_pages, owner);
}
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+ ASSERT(start != NULL);
+ ASSERT(size > 0);
+ ASSERT(initial_chunk_ != NULL);
+ ASSERT(InInitialChunk(start));
+ ASSERT(InInitialChunk(start + size - 1));
+
+ if (!initial_chunk_->Commit(start, size, executable)) return false;
#ifdef DEBUG
ZapBlock(start, size);
#endif
@@ -590,7 +583,13 @@ bool MemoryAllocator::CommitBlock(Address start,
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!VirtualMemory::UncommitRegion(start, size)) return false;
+ ASSERT(start != NULL);
+ ASSERT(size > 0);
+ ASSERT(initial_chunk_ != NULL);
+ ASSERT(InInitialChunk(start));
+ ASSERT(InInitialChunk(start + size - 1));
+
+ if (!initial_chunk_->Uncommit(start, size)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
@@ -603,49 +602,130 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) {
}
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
- if ((registration.space & space) == space &&
- (registration.action & action) == action)
- registration.callback(space, action, static_cast<int>(size));
+Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner) {
+ ASSERT(IsValidChunk(chunk_id));
+ ASSERT(pages_in_chunk > 0);
+
+ Address chunk_start = chunks_[chunk_id].address();
+
+ Address low = RoundUp(chunk_start, Page::kPageSize);
+
+#ifdef DEBUG
+ size_t chunk_size = chunks_[chunk_id].size();
+ Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+ ASSERT(pages_in_chunk <=
+ ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
+#endif
+
+ Address page_addr = low;
+ for (int i = 0; i < pages_in_chunk; i++) {
+ Page* p = Page::FromAddress(page_addr);
+ p->heap_ = owner->heap();
+ p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ p->InvalidateWatermark(true);
+ p->SetIsLargeObjectPage(false);
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ p->SetCachedAllocationWatermark(p->ObjectAreaStart());
+ page_addr += Page::kPageSize;
}
+
+ // Set the next page of the last page to 0.
+ Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+ last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+ return Page::FromAddress(low);
}
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) return true;
+Page* MemoryAllocator::FreePages(Page* p) {
+ if (!p->is_valid()) return p;
+
+ // Find the first page in the same chunk as 'p'
+ Page* first_page = FindFirstPageInSameChunk(p);
+ Page* page_to_return = Page::FromAddress(NULL);
+
+ if (p != first_page) {
+ // Find the last page in the same chunk as 'prev'.
+ Page* last_page = FindLastPageInSameChunk(p);
+ first_page = GetNextPage(last_page); // first page in next chunk
+
+ // set the next_page of last_page to NULL
+ SetNextPage(last_page, Page::FromAddress(NULL));
+ page_to_return = p; // return 'p' when exiting
}
- return false;
-}
+ while (first_page->is_valid()) {
+ int chunk_id = GetChunkId(first_page);
+ ASSERT(IsValidChunk(chunk_id));
-void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- ASSERT(callback != NULL);
- MemoryAllocationCallbackRegistration registration(callback, space, action);
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
- return memory_allocation_callbacks_.Add(registration);
+ // Find the first page of the next chunk before deleting this chunk.
+ first_page = GetNextPage(FindLastPageInSameChunk(first_page));
+
+ // Free the current chunk.
+ DeleteChunk(chunk_id);
+ }
+
+ return page_to_return;
}
-void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- ASSERT(callback != NULL);
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) {
- memory_allocation_callbacks_.Remove(i);
- return;
+void MemoryAllocator::FreeAllPages(PagedSpace* space) {
+ for (int i = 0, length = chunks_.length(); i < length; i++) {
+ if (chunks_[i].owner() == space) {
+ DeleteChunk(i);
}
}
- UNREACHABLE();
+}
+
+
+void MemoryAllocator::DeleteChunk(int chunk_id) {
+ ASSERT(IsValidChunk(chunk_id));
+
+ ChunkInfo& c = chunks_[chunk_id];
+
+ // We cannot free a chunk contained in the initial chunk because it was not
+ // allocated with AllocateRawMemory. Instead we uncommit the virtual
+ // memory.
+ if (InInitialChunk(c.address())) {
+ // TODO(1240712): VirtualMemory::Uncommit has a return value which
+ // is ignored here.
+ initial_chunk_->Uncommit(c.address(), c.size());
+ Counters* counters = isolate_->counters();
+ counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
+ } else {
+ LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
+ ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
+ size_t size = c.size();
+ size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
+ FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
+ PerformAllocationCallback(space, kAllocationActionFree, size);
+ }
+ c.init(NULL, 0, NULL);
+ Push(chunk_id);
+}
+
+
+Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
+ int chunk_id = GetChunkId(p);
+ ASSERT(IsValidChunk(chunk_id));
+
+ Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
+ return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
+ int chunk_id = GetChunkId(p);
+ ASSERT(IsValidChunk(chunk_id));
+
+ Address chunk_start = chunks_[chunk_id].address();
+ size_t chunk_size = chunks_[chunk_id].size();
+
+ Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+ ASSERT(chunk_start <= p->address() && p->address() < high);
+
+ return Page::FromAddress(high - Page::kPageSize);
}
@@ -659,6 +739,75 @@ void MemoryAllocator::ReportStatistics() {
}
#endif
+
+void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use) {
+ Page* first = NULL;
+ Page* last = NULL;
+
+ for (int i = 0, length = chunks_.length(); i < length; i++) {
+ ChunkInfo& chunk = chunks_[i];
+
+ if (chunk.owner() == space) {
+ if (first == NULL) {
+ Address low = RoundUp(chunk.address(), Page::kPageSize);
+ first = Page::FromAddress(low);
+ }
+ last = RelinkPagesInChunk(i,
+ chunk.address(),
+ chunk.size(),
+ last,
+ last_page_in_use);
+ }
+ }
+
+ if (first_page != NULL) {
+ *first_page = first;
+ }
+
+ if (last_page != NULL) {
+ *last_page = last;
+ }
+}
+
+
+Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ size_t chunk_size,
+ Page* prev,
+ Page** last_page_in_use) {
+ Address page_addr = RoundUp(chunk_start, Page::kPageSize);
+ int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
+
+ if (prev->is_valid()) {
+ SetNextPage(prev, Page::FromAddress(page_addr));
+ }
+
+ for (int i = 0; i < pages_in_chunk; i++) {
+ Page* p = Page::FromAddress(page_addr);
+ p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ page_addr += Page::kPageSize;
+
+ p->InvalidateWatermark(true);
+ if (p->WasInUseBeforeMC()) {
+ *last_page_in_use = p;
+ }
+ }
+
+ // Set the next page of the last page to 0.
+ Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+ last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+ if (last_page->WasInUseBeforeMC()) {
+ *last_page_in_use = last_page;
+ }
+
+ return last_page;
+}
+
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -666,11 +815,7 @@ PagedSpace::PagedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : Space(heap, id, executable),
- free_list_(this),
- was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- last_unswept_page_(Page::FromAddress(NULL)) {
+ : Space(heap, id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@@ -678,73 +823,215 @@ PagedSpace::PagedSpace(Heap* heap,
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
- anchor_.InitializeAsAnchor(this);
+ mc_forwarding_info_.top = NULL;
+ mc_forwarding_info_.limit = NULL;
}
-bool PagedSpace::Setup() {
+bool PagedSpace::Setup(Address start, size_t size) {
+ if (HasBeenSetup()) return false;
+
+ int num_pages = 0;
+ // Try to use the virtual memory range passed to us. If it is too small to
+ // contain at least one page, ignore it and allocate instead.
+ int pages_in_chunk = PagesInChunk(start, size);
+ if (pages_in_chunk > 0) {
+ first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
+ RoundUp(start, Page::kPageSize),
+ Page::kPageSize * pages_in_chunk,
+ this, &num_pages);
+ } else {
+ int requested_pages =
+ Min(MemoryAllocator::kPagesPerChunk,
+ static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
+ first_page_ =
+ Isolate::Current()->memory_allocator()->AllocatePages(
+ requested_pages, &num_pages, this);
+ if (!first_page_->is_valid()) return false;
+ }
+
+ // We are sure that the first page is valid and that we have at least one
+ // page.
+ ASSERT(first_page_->is_valid());
+ ASSERT(num_pages > 0);
+ accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
+ ASSERT(Capacity() <= max_capacity_);
+
+ // Sequentially clear region marks in the newly allocated
+ // pages and cache the current last page in the space.
+ for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ last_page_ = p;
+ }
+
+ // Use first_page_ for allocation.
+ SetAllocationInfo(&allocation_info_, first_page_);
+
+ page_list_is_chunk_ordered_ = true;
+
return true;
}
bool PagedSpace::HasBeenSetup() {
- return true;
+ return (Capacity() > 0);
}
void PagedSpace::TearDown() {
- PageIterator iterator(this);
- while (iterator.has_next()) {
- heap()->isolate()->memory_allocator()->Free(iterator.next());
- }
- anchor_.set_next_page(&anchor_);
- anchor_.set_prev_page(&anchor_);
+ Isolate::Current()->memory_allocator()->FreeAllPages(this);
+ first_page_ = NULL;
accounting_stats_.Clear();
}
+void PagedSpace::MarkAllPagesClean() {
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ }
+}
+
+
MaybeObject* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called on precisely swept spaces.
+ // Note: this function can only be called before or after mark-compact GC
+ // because it accesses map pointers.
ASSERT(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Failure::Exception();
Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p, NULL);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Address cur = obj->address();
+ ASSERT(IsUsed(p));
+ Address cur = p->ObjectAreaStart();
+ Address end = p->AllocationTop();
+ while (cur < end) {
+ HeapObject* obj = HeapObject::FromAddress(cur);
Address next = cur + obj->Size();
if ((cur <= addr) && (addr < next)) return obj;
+ cur = next;
}
UNREACHABLE();
return Failure::Exception();
}
-bool PagedSpace::CanExpand() {
- ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
- if (Capacity() == max_capacity_) return false;
+bool PagedSpace::IsUsed(Page* page) {
+ PageIterator it(this, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ if (page == it.next()) return true;
+ }
+ return false;
+}
- ASSERT(Capacity() < max_capacity_);
- // Are we going to exceed capacity for this space?
- if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
+ alloc_info->top = p->ObjectAreaStart();
+ alloc_info->limit = p->ObjectAreaEnd();
+ ASSERT(alloc_info->VerifyPagedAllocation());
+}
- return true;
+
+void PagedSpace::MCResetRelocationInfo() {
+ // Set page indexes.
+ int i = 0;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ Page* p = it.next();
+ p->mc_page_index = i++;
+ }
+
+ // Set mc_forwarding_info_ to the first page in the space.
+ SetAllocationInfo(&mc_forwarding_info_, first_page_);
+ // All the bytes in the space are 'available'. We will rediscover
+ // allocated and wasted bytes during GC.
+ accounting_stats_.Reset();
}
-bool PagedSpace::Expand() {
- if (!CanExpand()) return false;
- Page* p = heap()->isolate()->memory_allocator()->
- AllocatePage(this, executable());
- if (p == NULL) return false;
+int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
+#ifdef DEBUG
+ // The Contains function considers the address at the beginning of a
+ // page in the page, MCSpaceOffsetForAddress considers it is in the
+ // previous page.
+ if (Page::IsAlignedToPageSize(addr)) {
+ ASSERT(Contains(addr - kPointerSize));
+ } else {
+ ASSERT(Contains(addr));
+ }
+#endif
+ // If addr is at the end of a page, it belongs to previous page
+ Page* p = Page::IsAlignedToPageSize(addr)
+ ? Page::FromAllocationTop(addr)
+ : Page::FromAddress(addr);
+ int index = p->mc_page_index;
+ return (index * Page::kPageSize) + p->Offset(addr);
+}
+
+
+// Slow case for reallocating and promoting objects during a compacting
+// collection. This function is not space-specific.
+HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
+ Page* current_page = TopPageOf(mc_forwarding_info_);
+ if (!current_page->next_page()->is_valid()) {
+ if (!Expand(current_page)) {
+ return NULL;
+ }
+ }
+
+ // There are surely more pages in the space now.
+ ASSERT(current_page->next_page()->is_valid());
+ // We do not add the top of page block for current page to the space's
+ // free list---the block may contain live objects so we cannot write
+ // bookkeeping information to it. Instead, we will recover top of page
+ // blocks when we move objects to their new locations.
+ //
+ // We do however write the allocation pointer to the page. The encoding
+ // of forwarding addresses is as an offset in terms of live bytes, so we
+ // need quick access to the allocation top of each page to decode
+ // forwarding addresses.
+ current_page->SetAllocationWatermark(mc_forwarding_info_.top);
+ current_page->next_page()->InvalidateWatermark(true);
+ SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
+ return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+}
+
+
+bool PagedSpace::Expand(Page* last_page) {
+ ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
+ ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+
+ if (Capacity() == max_capacity_) return false;
+
+ ASSERT(Capacity() < max_capacity_);
+ // Last page must be valid and its next page is invalid.
+ ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
+
+ int available_pages =
+ static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
+ // We don't want to have to handle small chunks near the end so if there are
+ // not kPagesPerChunk pages available without exceeding the max capacity then
+ // act as if memory has run out.
+ if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
+
+ int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
+ desired_pages, &desired_pages, this);
+ if (!p->is_valid()) return false;
+
+ accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- p->InsertAfter(anchor_.prev_page());
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
+
+ // Sequentially clear region marks of new pages and and cache the
+ // new last page in the space.
+ while (p->is_valid()) {
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ last_page_ = p;
+ p = p->next_page();
+ }
return true;
}
@@ -752,10 +1039,8 @@ bool PagedSpace::Expand() {
#ifdef DEBUG
int PagedSpace::CountTotalPages() {
- PageIterator it(this);
int count = 0;
- while (it.has_next()) {
- it.next();
+ for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
count++;
}
return count;
@@ -763,30 +1048,63 @@ int PagedSpace::CountTotalPages() {
#endif
-void PagedSpace::ReleasePage(Page* page) {
- ASSERT(page->LiveBytes() == 0);
- page->Unlink();
- if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
- heap()->isolate()->memory_allocator()->Free(page);
- } else {
- heap()->QueueMemoryChunkForFree(page);
+void PagedSpace::Shrink() {
+ if (!page_list_is_chunk_ordered_) {
+ // We can't shrink space if pages is not chunk-ordered
+ // (see comment for class MemoryAllocator for definition).
+ return;
}
- ASSERT(Capacity() > 0);
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
- accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+ // Release half of free pages.
+ Page* top_page = AllocationTopPage();
+ ASSERT(top_page->is_valid());
+
+ // Count the number of pages we would like to free.
+ int pages_to_free = 0;
+ for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+ pages_to_free++;
+ }
+
+ // Free pages after top_page.
+ Page* p = heap()->isolate()->memory_allocator()->
+ FreePages(top_page->next_page());
+ heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
+
+ // Find out how many pages we failed to free and update last_page_.
+ // Please note pages can only be freed in whole chunks.
+ last_page_ = top_page;
+ for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+ pages_to_free--;
+ last_page_ = p;
+ }
+
+ accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
+ ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
}
-void PagedSpace::ReleaseAllUnusedPages() {
- PageIterator it(this);
- while (it.has_next()) {
- Page* page = it.next();
- if (page->LiveBytes() == 0) {
- ReleasePage(page);
- }
+bool PagedSpace::EnsureCapacity(int capacity) {
+ if (Capacity() >= capacity) return true;
+
+ // Start from the allocation top and loop to the last page in the space.
+ Page* last_page = AllocationTopPage();
+ Page* next_page = last_page->next_page();
+ while (next_page->is_valid()) {
+ last_page = heap()->isolate()->memory_allocator()->
+ FindLastPageInSameChunk(next_page);
+ next_page = last_page->next_page();
}
- heap()->FreeQueuedChunks();
+
+ // Expand the space until it has the required capacity or expansion fails.
+ do {
+ if (!Expand(last_page)) return false;
+ ASSERT(last_page->next_page()->is_valid());
+ last_page =
+ heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
+ last_page->next_page());
+ } while (Capacity() < capacity);
+
+ return true;
}
@@ -796,52 +1114,61 @@ void PagedSpace::Print() { }
#ifdef DEBUG
+// We do not assume that the PageIterator works, because it depends on the
+// invariants we are checking during verification.
void PagedSpace::Verify(ObjectVisitor* visitor) {
- // We can only iterate over the pages if they were swept precisely.
- if (was_swept_conservatively_) return;
-
- bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
- PageIterator page_iterator(this);
- while (page_iterator.has_next()) {
- Page* page = page_iterator.next();
- ASSERT(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
- allocation_pointer_found_in_space = true;
- }
- ASSERT(page->WasSweptPrecisely());
- HeapObjectIterator it(page, NULL);
- Address end_of_previous_object = page->ObjectAreaStart();
- Address top = page->ObjectAreaEnd();
- int black_size = 0;
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- ASSERT(end_of_previous_object <= object->address());
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- ASSERT(map->IsMap());
- ASSERT(heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object->Verify();
-
- // All the interior pointers should be contained in the heap.
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
- if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
- black_size += size;
+ // The allocation pointer should be valid, and it should be in a page in the
+ // space.
+ ASSERT(allocation_info_.VerifyPagedAllocation());
+ Page* top_page = Page::FromAllocationTop(allocation_info_.top);
+ ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
+
+ // Loop over all the pages.
+ bool above_allocation_top = false;
+ Page* current_page = first_page_;
+ while (current_page->is_valid()) {
+ if (above_allocation_top) {
+ // We don't care what's above the allocation top.
+ } else {
+ Address top = current_page->AllocationTop();
+ if (current_page == top_page) {
+ ASSERT(top == allocation_info_.top);
+ // The next page will be above the allocation top.
+ above_allocation_top = true;
}
- ASSERT(object->address() + size <= top);
- end_of_previous_object = object->address() + size;
+ // It should be packed with objects from the bottom to the top.
+ Address current = current_page->ObjectAreaStart();
+ while (current < top) {
+ HeapObject* object = HeapObject::FromAddress(current);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(heap()->map_space()->Contains(map));
+
+ // Perform space-specific object verification.
+ VerifyObject(object);
+
+ // The object itself should look OK.
+ object->Verify();
+
+ // All the interior pointers should be contained in the heap and
+ // have page regions covering intergenerational references should be
+ // marked dirty.
+ int size = object->Size();
+ object->IterateBody(map->instance_type(), size, visitor);
+
+ current += size;
+ }
+
+ // The allocation pointer should not be in the middle of an object.
+ ASSERT(current == top);
}
- ASSERT_LE(black_size, page->LiveBytes());
+
+ current_page = current_page->next_page();
}
- ASSERT(allocation_pointer_found_in_space);
}
#endif
@@ -850,23 +1177,13 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// NewSpace implementation
-bool NewSpace::Setup(int reserved_semispace_capacity,
- int maximum_semispace_capacity) {
+bool NewSpace::Setup(Address start, int size) {
// Setup new space based on the preallocated memory block defined by
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-
- size_t size = 2 * reserved_semispace_capacity;
- Address base =
- heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
- size, size, &reservation_);
- if (base == NULL) return false;
-
- chunk_base_ = base;
- chunk_size_ = static_cast<uintptr_t>(size);
- LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
+ int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -880,29 +1197,31 @@ bool NewSpace::Setup(int reserved_semispace_capacity,
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
- ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
- ASSERT(static_cast<intptr_t>(chunk_size_) >=
- 2 * heap()->ReservedSemiSpaceSize());
- ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
+ ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
+ ASSERT(IsAddressAligned(start, size, 0));
- if (!to_space_.Setup(chunk_base_,
+ if (!to_space_.Setup(start,
initial_semispace_capacity,
maximum_semispace_capacity)) {
return false;
}
- if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
+ if (!from_space_.Setup(start + maximum_semispace_capacity,
initial_semispace_capacity,
maximum_semispace_capacity)) {
return false;
}
- start_ = chunk_base_;
- address_mask_ = ~(2 * reserved_semispace_capacity - 1);
+ start_ = start;
+ address_mask_ = ~(size - 1);
object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
+ object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
- ResetAllocationInfo();
+ allocation_info_.top = to_space_.low();
+ allocation_info_.limit = to_space_.high();
+ mc_forwarding_info_.top = NULL;
+ mc_forwarding_info_.limit = NULL;
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
@@ -920,34 +1239,28 @@ void NewSpace::TearDown() {
start_ = NULL;
allocation_info_.top = NULL;
allocation_info_.limit = NULL;
+ mc_forwarding_info_.top = NULL;
+ mc_forwarding_info_.limit = NULL;
to_space_.TearDown();
from_space_.TearDown();
-
- LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
-
- ASSERT(reservation_.IsReserved());
- heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
- NOT_EXECUTABLE);
- chunk_base_ = NULL;
- chunk_size_ = 0;
}
void NewSpace::Flip() {
- SemiSpace::Swap(&from_space_, &to_space_);
+ SemiSpace tmp = from_space_;
+ from_space_ = to_space_;
+ to_space_ = tmp;
}
void NewSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
ASSERT(Capacity() < MaximumCapacity());
- int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space.
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
+ if (to_space_.Grow()) {
+ // Only grow from space if we managed to grow to space.
+ if (!from_space_.Grow()) {
+ // If we managed to grow to space but couldn't grow from space,
+ // attempt to shrink to space.
if (!to_space_.ShrinkTo(from_space_.Capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
@@ -955,20 +1268,21 @@ void NewSpace::Grow() {
}
}
}
+ allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::Shrink() {
int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
- int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+ int rounded_new_capacity =
+ RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
if (rounded_new_capacity < Capacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from-space if we managed to shrink to-space.
- from_space_.Reset();
+ // Only shrink from space if we managed to shrink to space.
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to-space but couldn't shrink from
- // space, attempt to grow to-space again.
+ // If we managed to shrink to space but couldn't shrink from
+ // space, attempt to grow to space again.
if (!to_space_.GrowTo(from_space_.Capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
@@ -976,65 +1290,36 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-void NewSpace::UpdateAllocationInfo() {
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
-
- // Lower limit during incremental marking.
- if (heap()->incremental_marking()->IsMarking() &&
- inline_allocation_limit_step() != 0) {
- Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
- }
+void NewSpace::ResetAllocationInfo() {
+ allocation_info_.top = to_space_.low();
+ allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-void NewSpace::ResetAllocationInfo() {
- to_space_.Reset();
- UpdateAllocationInfo();
- pages_used_ = 0;
- // Clear all mark-bits in the to-space.
- NewSpacePageIterator it(&to_space_);
- while (it.has_next()) {
- Bitmap::Clear(it.next());
- }
+void NewSpace::MCResetRelocationInfo() {
+ mc_forwarding_info_.top = from_space_.low();
+ mc_forwarding_info_.limit = from_space_.high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
}
-bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
- if (NewSpacePage::IsAtStart(top)) {
- // The current page is already empty. Don't try to make another.
-
- // We should only get here if someone asks to allocate more
- // than what can be stored in a single page.
- // TODO(gc): Change the limit on new-space allocation to prevent this
- // from happening (all such allocations should go directly to LOSpace).
- return false;
- }
- if (!to_space_.AdvancePage()) {
- // Failed to get a new page in to-space.
- return false;
- }
- // Clear remainder of current page.
- int remaining_in_page =
- static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page);
- pages_used_++;
- UpdateAllocationInfo();
- return true;
+void NewSpace::MCCommitRelocationInfo() {
+ // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
+ // valid allocation info for the to space.
+ allocation_info_.top = mc_forwarding_info_.top;
+ allocation_info_.limit = to_space_.high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
#ifdef DEBUG
-// We do not use the SemiSpaceIterator because verification doesn't assume
+// We do not use the SemispaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
// The allocation pointer should be in the space or at the very end.
@@ -1042,52 +1327,58 @@ void NewSpace::Verify() {
// There should be objects packed in from the low address up to the
// allocation pointer.
- Address current = to_space_.first_page()->body();
- CHECK_EQ(current, to_space_.space_start());
+ Address current = to_space_.low();
+ while (current < top()) {
+ HeapObject* object = HeapObject::FromAddress(current);
- while (current != top()) {
- if (!NewSpacePage::IsAtEnd(current)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
- current < top());
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map* map = object->map();
+ ASSERT(map->IsMap());
+ ASSERT(heap()->map_space()->Contains(map));
- HeapObject* object = HeapObject::FromAddress(current);
+ // The object should not be code or a map.
+ ASSERT(!object->IsMap());
+ ASSERT(!object->IsCode());
+
+ // The object itself should look OK.
+ object->Verify();
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map* map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map));
+ // All the interior pointers should be contained in the heap.
+ VerifyPointersVisitor visitor;
+ int size = object->Size();
+ object->IterateBody(map->instance_type(), size, &visitor);
- // The object should not be code or a map.
- CHECK(!object->IsMap());
- CHECK(!object->IsCode());
+ current += size;
+ }
- // The object itself should look OK.
- object->Verify();
+ // The allocation pointer should not be in the middle of an object.
+ ASSERT(current == top());
+}
+#endif
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor;
- int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
- current += size;
- } else {
- // At end of page, switch to next page.
- NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
- // Next page should be valid.
- CHECK(!page->is_anchor());
- current = page->body();
- }
+bool SemiSpace::Commit() {
+ ASSERT(!is_committed());
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ start_, capacity_, executable())) {
+ return false;
}
+ committed_ = true;
+ return true;
+}
- // Check semi-spaces.
- ASSERT_EQ(from_space_.id(), kFromSpace);
- ASSERT_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
+
+bool SemiSpace::Uncommit() {
+ ASSERT(is_committed());
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ start_, capacity_)) {
+ return false;
+ }
+ committed_ = false;
+ return true;
}
-#endif
+
// -----------------------------------------------------------------------------
// SemiSpace implementation
@@ -1101,11 +1392,11 @@ bool SemiSpace::Setup(Address start,
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
- ASSERT(maximum_capacity >= Page::kPageSize);
- initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+ initial_capacity_ = initial_capacity;
capacity_ = initial_capacity;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ maximum_capacity_ = maximum_capacity;
committed_ = false;
+
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTagMask;
@@ -1122,258 +1413,81 @@ void SemiSpace::TearDown() {
}
-bool SemiSpace::Commit() {
- ASSERT(!is_committed());
- int pages = capacity_ / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - pages * Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
- capacity_,
- executable())) {
+bool SemiSpace::Grow() {
+ // Double the semispace size but only up to maximum capacity.
+ int maximum_extra = maximum_capacity_ - capacity_;
+ int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
+ maximum_extra);
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
+ high(), extra, executable())) {
return false;
}
-
- NewSpacePage* page = anchor();
- for (int i = 1; i <= pages; i++) {
- NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
- new_page->InsertAfter(page);
- page = new_page;
- }
-
- committed_ = true;
- Reset();
- return true;
-}
-
-
-bool SemiSpace::Uncommit() {
- ASSERT(is_committed());
- Address start = start_ + maximum_capacity_ - capacity_;
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
- return false;
- }
- anchor()->set_next_page(anchor());
- anchor()->set_prev_page(anchor());
-
- committed_ = false;
+ capacity_ += extra;
return true;
}
bool SemiSpace::GrowTo(int new_capacity) {
- ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity <= maximum_capacity_);
ASSERT(new_capacity > capacity_);
- int pages_before = capacity_ / Page::kPageSize;
- int pages_after = new_capacity / Page::kPageSize;
-
- Address end = start_ + maximum_capacity_;
- Address start = end - new_capacity;
size_t delta = new_capacity - capacity_;
-
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start, delta, executable())) {
+ high(), delta, executable())) {
return false;
}
capacity_ = new_capacity;
- NewSpacePage* last_page = anchor()->prev_page();
- ASSERT(last_page != anchor());
- for (int i = pages_before + 1; i <= pages_after; i++) {
- Address page_address = end - i * Page::kPageSize;
- NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
- page_address,
- this);
- new_page->InsertAfter(last_page);
- Bitmap::Clear(new_page);
- // Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page->GetFlags(),
- NewSpacePage::kCopyOnFlipFlagsMask);
- last_page = new_page;
- }
return true;
}
bool SemiSpace::ShrinkTo(int new_capacity) {
- ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ high() - delta, delta)) {
return false;
}
capacity_ = new_capacity;
-
- int pages_after = capacity_ / Page::kPageSize;
- NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
-
return true;
}
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
- anchor_.set_owner(this);
- // Fixup back-pointers to anchor. Address of anchor changes
- // when we swap.
- anchor_.prev_page()->set_next_page(&anchor_);
- anchor_.next_page()->set_prev_page(&anchor_);
-
- bool becomes_to_space = (id_ == kFromSpace);
- id_ = becomes_to_space ? kToSpace : kFromSpace;
- NewSpacePage* page = anchor_.next_page();
- while (page != &anchor_) {
- page->set_owner(this);
- page->SetFlags(flags, mask);
- if (becomes_to_space) {
- page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
- page->SetFlag(MemoryChunk::IN_TO_SPACE);
- page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- page->ResetLiveBytes();
- } else {
- page->SetFlag(MemoryChunk::IN_FROM_SPACE);
- page->ClearFlag(MemoryChunk::IN_TO_SPACE);
- }
- ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
- ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
- page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
- page = page->next_page();
- }
-}
-
-
-void SemiSpace::Reset() {
- ASSERT(anchor_.next_page() != &anchor_);
- current_page_ = anchor_.next_page();
-}
-
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
- // We won't be swapping semispaces without data in them.
- ASSERT(from->anchor_.next_page() != &from->anchor_);
- ASSERT(to->anchor_.next_page() != &to->anchor_);
-
- // Swap bits.
- SemiSpace tmp = *from;
- *from = *to;
- *to = tmp;
-
- // Fixup back-pointers to the page list anchor now that its address
- // has changed.
- // Swap to/from-space bits on pages.
- // Copy GC flags from old active space (from-space) to new (to-space).
- intptr_t flags = from->current_page()->GetFlags();
- to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
-
- from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::set_age_mark(Address mark) {
- ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
- age_mark_ = mark;
- // Mark all pages up to the one containing mark.
- NewSpacePageIterator it(space_start(), mark);
- while (it.has_next()) {
- it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- }
-}
-
-
#ifdef DEBUG
void SemiSpace::Print() { }
-void SemiSpace::Verify() {
- bool is_from_space = (id_ == kFromSpace);
- NewSpacePage* page = anchor_.next_page();
- CHECK(anchor_.semi_space() == this);
- while (page != &anchor_) {
- CHECK(page->semi_space() == this);
- CHECK(page->InNewSpace());
- CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE));
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
- if (!is_from_space) {
- // The pointers-from-here-are-interesting flag isn't updated dynamically
- // on from-space pages, so it might be out of sync with the marking state.
- if (page->heap()->incremental_marking()->IsMarking()) {
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- } else {
- CHECK(!page->IsFlagSet(
- MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- }
- // TODO(gc): Check that the live_bytes_count_ field matches the
- // black marking on the page (if we make it match in new-space).
- }
- CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
- CHECK(page->prev_page()->next_page() == page);
- page = page->next_page();
- }
-}
-
-
-void SemiSpace::AssertValidRange(Address start, Address end) {
- // Addresses belong to same semi-space
- NewSpacePage* page = NewSpacePage::FromLimit(start);
- NewSpacePage* end_page = NewSpacePage::FromLimit(end);
- SemiSpace* space = page->semi_space();
- CHECK_EQ(space, end_page->semi_space());
- // Start address is before end address, either on same page,
- // or end address is on a later page in the linked list of
- // semi-space pages.
- if (page == end_page) {
- CHECK(start <= end);
- } else {
- while (page != end_page) {
- page = page->next_page();
- CHECK_NE(page, space->anchor());
- }
- }
-}
+void SemiSpace::Verify() { }
#endif
// -----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space->bottom(), space->top(), NULL);
+ Initialize(space, space->bottom(), space->top(), NULL);
}
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
HeapObjectCallback size_func) {
- Initialize(space->bottom(), space->top(), size_func);
+ Initialize(space, space->bottom(), space->top(), size_func);
}
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(start, space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
- Initialize(from, to, NULL);
+ Initialize(space, start, space->top(), NULL);
}
-void SemiSpaceIterator::Initialize(Address start,
+void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
Address end,
HeapObjectCallback size_func) {
- SemiSpace::AssertValidRange(start, end);
+ ASSERT(space->ToSpaceContains(start));
+ ASSERT(space->ToSpaceLow() <= end
+ && end <= space->ToSpaceHigh());
+ space_ = &space->to_space_;
current_ = start;
limit_ = end;
size_func_ = size_func;
@@ -1509,7 +1623,7 @@ void NewSpace::ClearHistograms() {
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
RecordAllocation(obj);
}
@@ -1585,6 +1699,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
promoted_histogram_[type].increment_bytes(obj->Size());
}
+
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
@@ -1593,17 +1708,17 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
ASSERT(IsAligned(size_in_bytes, kPointerSize));
// We write a map and possibly size information to the block. If the block
- // is big enough to be a FreeSpace with at least one extra word (the next
- // pointer), we set its map to be the free space map and its size to an
+ // is big enough to be a ByteArray with at least one extra word (the next
+ // pointer), we set its map to be the byte array map and its size to an
// appropriate array length for the desired size from HeapObject::Size().
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
- if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map(heap->raw_unchecked_free_space_map());
- // Can't use FreeSpace::cast because it fails during deserialization.
- FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->set_size(size_in_bytes);
+ if (size_in_bytes > ByteArray::kHeaderSize) {
+ set_map(heap->raw_unchecked_byte_array_map());
+ // Can't use ByteArray::cast because it fails during deserialization.
+ ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
+ this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
set_map(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
@@ -1612,300 +1727,318 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
UNREACHABLE();
}
// We would like to ASSERT(Size() == size_in_bytes) but this would fail during
- // deserialization because the free space map is not done yet.
+ // deserialization because the byte array map is not done yet.
}
-FreeListNode* FreeListNode::next() {
+Address FreeListNode::next(Heap* heap) {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_free_space_map()) {
- ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kNextOffset));
+ if (map() == heap->raw_unchecked_byte_array_map()) {
+ ASSERT(Size() >= kNextOffset + kPointerSize);
+ return Memory::Address_at(address() + kNextOffset);
} else {
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kPointerSize));
+ return Memory::Address_at(address() + kPointerSize);
}
}
-FreeListNode** FreeListNode::next_address() {
+void FreeListNode::set_next(Heap* heap, Address next) {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == heap->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
+ Memory::Address_at(address() + kNextOffset) = next;
} else {
- return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
+ Memory::Address_at(address() + kPointerSize) = next;
}
}
-void FreeListNode::set_next(FreeListNode* next) {
- ASSERT(IsFreeListNode(this));
- // While we are booting the VM the free space map will actually be null. So
- // we have to make sure that we don't try to use it for anything at that
- // stage.
- if (map() == HEAP->raw_unchecked_free_space_map()) {
- ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
- Memory::Address_at(address() + kNextOffset) =
- reinterpret_cast<Address>(next);
- } else {
- Memory::Address_at(address() + kPointerSize) =
- reinterpret_cast<Address>(next);
- }
+OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
+ : heap_(heap),
+ owner_(owner) {
+ Reset();
}
-FreeList::FreeList(PagedSpace* owner)
- : owner_(owner), heap_(owner->heap()) {
- Reset();
+void OldSpaceFreeList::Reset() {
+ available_ = 0;
+ for (int i = 0; i < kFreeListsLength; i++) {
+ free_[i].head_node_ = NULL;
+ }
+ needs_rebuild_ = false;
+ finger_ = kHead;
+ free_[kHead].next_size_ = kEnd;
}
-void FreeList::Reset() {
- available_ = 0;
- small_list_ = NULL;
- medium_list_ = NULL;
- large_list_ = NULL;
- huge_list_ = NULL;
+void OldSpaceFreeList::RebuildSizeList() {
+ ASSERT(needs_rebuild_);
+ int cur = kHead;
+ for (int i = cur + 1; i < kFreeListsLength; i++) {
+ if (free_[i].head_node_ != NULL) {
+ free_[cur].next_size_ = i;
+ cur = i;
+ }
+ }
+ free_[cur].next_size_ = kEnd;
+ needs_rebuild_ = false;
}
-int FreeList::Free(Address start, int size_in_bytes) {
- if (size_in_bytes == 0) return 0;
+int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
+#ifdef DEBUG
+ Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
+#endif
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(heap_, size_in_bytes);
- // Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) return size_in_bytes;
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- if (size_in_bytes <= kSmallListMax) {
- node->set_next(small_list_);
- small_list_ = node;
- } else if (size_in_bytes <= kMediumListMax) {
- node->set_next(medium_list_);
- medium_list_ = node;
- } else if (size_in_bytes <= kLargeListMax) {
- node->set_next(large_list_);
- large_list_ = node;
- } else {
- node->set_next(huge_list_);
- huge_list_ = node;
+ // We don't use the freelists in compacting mode. This makes it more like a
+ // GC that only has mark-sweep-compact and doesn't have a mark-sweep
+ // collector.
+ if (FLAG_always_compact) {
+ return size_in_bytes;
}
+
+ // Early return to drop too-small blocks on the floor (one or two word
+ // blocks cannot hold a map pointer, a size field, and a pointer to the
+ // next block in the free list).
+ if (size_in_bytes < kMinBlockSize) {
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of an exact free list.
+ int index = size_in_bytes >> kPointerSizeLog2;
+ node->set_next(heap_, free_[index].head_node_);
+ free_[index].head_node_ = node->address();
available_ += size_in_bytes;
- ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ needs_rebuild_ = true;
return 0;
}
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
- FreeListNode* node = *list;
-
- if (node == NULL) return NULL;
-
- while (node != NULL &&
- Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= node->Size();
- node = node->next();
- }
+MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
+ ASSERT(0 < size_in_bytes);
+ ASSERT(size_in_bytes <= kMaxBlockSize);
+ ASSERT(IsAligned(size_in_bytes, kPointerSize));
- if (node != NULL) {
- *node_size = node->Size();
- *list = node->next();
+ if (needs_rebuild_) RebuildSizeList();
+ int index = size_in_bytes >> kPointerSizeLog2;
+ // Check for a perfect fit.
+ if (free_[index].head_node_ != NULL) {
+ FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
+ // If this was the last block of its size, remove the size.
+ if ((free_[index].head_node_ = node->next(heap_)) == NULL)
+ RemoveSize(index);
+ available_ -= size_in_bytes;
+ *wasted_bytes = 0;
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
+ return node;
+ }
+ // Search the size list for the best fit.
+ int prev = finger_ < index ? finger_ : kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(index < cur);
+ if (cur == kEnd) {
+ // No large enough size in list.
+ *wasted_bytes = 0;
+ return Failure::RetryAfterGC(owner_);
+ }
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
+ int rem = cur - index;
+ int rem_bytes = rem << kPointerSizeLog2;
+ FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
+ ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
+ FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
+ size_in_bytes);
+ // Distinguish the cases prev < rem < cur and rem <= prev < cur
+ // to avoid many redundant tests and calls to Insert/RemoveSize.
+ if (prev < rem) {
+ // Simple case: insert rem between prev and cur.
+ finger_ = prev;
+ free_[prev].next_size_ = rem;
+ // If this was the last block of size cur, remove the size.
+ if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
+ free_[rem].next_size_ = free_[cur].next_size_;
+ } else {
+ free_[rem].next_size_ = cur;
+ }
+ // Add the remainder block.
+ rem_node->set_size(heap_, rem_bytes);
+ rem_node->set_next(heap_, free_[rem].head_node_);
+ free_[rem].head_node_ = rem_node->address();
} else {
- *list = NULL;
+ // If this was the last block of size cur, remove the size.
+ if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
+ finger_ = prev;
+ free_[prev].next_size_ = free_[cur].next_size_;
+ }
+ if (rem_bytes < kMinBlockSize) {
+ // Too-small remainder is wasted.
+ rem_node->set_size(heap_, rem_bytes);
+ available_ -= size_in_bytes + rem_bytes;
+ *wasted_bytes = rem_bytes;
+ return cur_node;
+ }
+ // Add the remainder block and, if needed, insert its size.
+ rem_node->set_size(heap_, rem_bytes);
+ rem_node->set_next(heap_, free_[rem].head_node_);
+ free_[rem].head_node_ = rem_node->address();
+ if (rem_node->next(heap_) == NULL) InsertSize(rem);
}
-
- return node;
+ available_ -= size_in_bytes;
+ *wasted_bytes = 0;
+ return cur_node;
}
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeListNode* node = NULL;
-
- if (size_in_bytes <= kSmallAllocationMax) {
- node = PickNodeFromList(&small_list_, node_size);
- if (node != NULL) return node;
+void OldSpaceFreeList::MarkNodes() {
+ for (int i = 0; i < kFreeListsLength; i++) {
+ Address cur_addr = free_[i].head_node_;
+ while (cur_addr != NULL) {
+ FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+ cur_addr = cur_node->next(heap_);
+ cur_node->SetMark();
+ }
}
+}
- if (size_in_bytes <= kMediumAllocationMax) {
- node = PickNodeFromList(&medium_list_, node_size);
- if (node != NULL) return node;
- }
- if (size_in_bytes <= kLargeAllocationMax) {
- node = PickNodeFromList(&large_list_, node_size);
- if (node != NULL) return node;
+#ifdef DEBUG
+bool OldSpaceFreeList::Contains(FreeListNode* node) {
+ for (int i = 0; i < kFreeListsLength; i++) {
+ Address cur_addr = free_[i].head_node_;
+ while (cur_addr != NULL) {
+ FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+ if (cur_node == node) return true;
+ cur_addr = cur_node->next(heap_);
+ }
}
+ return false;
+}
+#endif
- for (FreeListNode** cur = &huge_list_;
- *cur != NULL;
- cur = (*cur)->next_address()) {
- FreeListNode* cur_node = *cur;
- while (cur_node != NULL &&
- Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
- cur_node = cur_node->next();
- }
- *cur = cur_node;
- if (cur_node == NULL) break;
-
- ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- node = *cur;
- *node_size = size;
- *cur = node->next();
- break;
- }
- }
+FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
+ AllocationSpace owner,
+ int object_size)
+ : heap_(heap), owner_(owner), object_size_(object_size) {
+ Reset();
+}
- return node;
+
+void FixedSizeFreeList::Reset() {
+ available_ = 0;
+ head_ = tail_ = NULL;
}
-// Allocation on the old space free list. If it succeeds then a new linear
-// allocation space has been set up with the top and limit of the space. If
-// the allocation fails then NULL is returned, and the caller can perform a GC
-// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- ASSERT(size_in_bytes <= kMaxBlockSize);
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
- // Don't free list allocate if there is linear space available.
- ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
-
- int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) return NULL;
-
- available_ -= new_node_size;
- ASSERT(IsVeryLong() || available_ == SumFreeLists());
-
- int bytes_left = new_node_size - size_in_bytes;
- ASSERT(bytes_left >= 0);
-
- int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- owner_->Free(owner_->top(), old_linear_size);
- owner_->heap()->incremental_marking()->OldSpaceStep(
- size_in_bytes - old_linear_size);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- owner_->Allocate(new_node_size);
-
- if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
- int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
- // We don't want to give too large linear areas to the allocator while
- // incremental marking is going on, because we won't check again whether
- // we want to do another increment until the linear area is used up.
- owner_->Free(new_node->address() + size_in_bytes + linear_size,
- new_node_size - size_in_bytes - linear_size);
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
- } else if (bytes_left > 0) {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+void FixedSizeFreeList::Free(Address start) {
+#ifdef DEBUG
+ Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
+#endif
+ // We only use the freelists with mark-sweep.
+ ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
+ FreeListNode* node = FreeListNode::FromAddress(start);
+ node->set_size(heap_, object_size_);
+ node->set_next(heap_, NULL);
+ if (head_ == NULL) {
+ tail_ = head_ = node->address();
} else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTop(NULL, NULL);
+ FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
+ tail_ = node->address();
}
-
- return new_node;
+ available_ += object_size_;
}
-static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
- intptr_t sum = 0;
- while (n != NULL) {
- if (Page::FromAddress(n->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
- sum += free_space->Size();
- }
- n = n->next();
+MaybeObject* FixedSizeFreeList::Allocate() {
+ if (head_ == NULL) {
+ return Failure::RetryAfterGC(owner_);
}
- return sum;
-}
-
-void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
- sizes[0] = CountFreeListItemsInList(small_list_, p);
- sizes[1] = CountFreeListItemsInList(medium_list_, p);
- sizes[2] = CountFreeListItemsInList(large_list_, p);
- sizes[3] = CountFreeListItemsInList(huge_list_, p);
+ ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
+ FreeListNode* node = FreeListNode::FromAddress(head_);
+ head_ = node->next(heap_);
+ available_ -= object_size_;
+ return node;
}
-#ifdef DEBUG
-intptr_t FreeList::SumFreeList(FreeListNode* cur) {
- intptr_t sum = 0;
- while (cur != NULL) {
- ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->Size();
- cur = cur->next();
+
+void FixedSizeFreeList::MarkNodes() {
+ Address cur_addr = head_;
+ while (cur_addr != NULL && cur_addr != tail_) {
+ FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+ cur_addr = cur_node->next(heap_);
+ cur_node->SetMark();
}
- return sum;
}
-static const int kVeryLongFreeList = 500;
-
+// -----------------------------------------------------------------------------
+// OldSpace implementation
-int FreeList::FreeListLength(FreeListNode* cur) {
- int length = 0;
- while (cur != NULL) {
- length++;
- cur = cur->next();
- if (length == kVeryLongFreeList) return length;
+void OldSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ PagedSpace::PrepareForMarkCompact(will_compact);
+
+ if (will_compact) {
+ // Reset relocation info. During a compacting collection, everything in
+ // the space is considered 'available' and we will rediscover live data
+ // and waste during the collection.
+ MCResetRelocationInfo();
+ ASSERT(Available() == Capacity());
+ } else {
+ // During a non-compacting collection, everything below the linear
+ // allocation pointer is considered allocated (everything above is
+ // available) and we will rediscover available and wasted bytes during
+ // the collection.
+ accounting_stats_.AllocateBytes(free_list_.available());
+ accounting_stats_.FillWastedBytes(Waste());
}
- return length;
+
+ // Clear the free list before a full GC---it will be rebuilt afterward.
+ free_list_.Reset();
}
-bool FreeList::IsVeryLong() {
- if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
- return false;
-}
+void OldSpace::MCCommitRelocationInfo() {
+ // Update fast allocation info.
+ allocation_info_.top = mc_forwarding_info_.top;
+ allocation_info_.limit = mc_forwarding_info_.limit;
+ ASSERT(allocation_info_.VerifyPagedAllocation());
+ // The space is compacted and we haven't yet built free lists or
+ // wasted any space.
+ ASSERT(Waste() == 0);
+ ASSERT(AvailableFree() == 0);
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
- intptr_t sum = SumFreeList(small_list_);
- sum += SumFreeList(medium_list_);
- sum += SumFreeList(large_list_);
- sum += SumFreeList(huge_list_);
- return sum;
-}
-#endif
+ // Build the free list for the space.
+ int computed_size = 0;
+ PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+ while (it.has_next()) {
+ Page* p = it.next();
+ // Space below the relocation pointer is allocated.
+ computed_size +=
+ static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
+ if (it.has_next()) {
+ // Free the space at the top of the page.
+ int extra_size =
+ static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
+ if (extra_size > 0) {
+ int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
+ extra_size);
+ // The bytes we have just "freed" to add to the free list were
+ // already accounted as available.
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
+ }
+ }
+ // Make sure the computed size - based on the used portion of the pages in
+ // use - matches the size obtained while computing forwarding addresses.
+ ASSERT(computed_size == Size());
+}
-// -----------------------------------------------------------------------------
-// OldSpace implementation
bool NewSpace::ReserveSpace(int bytes) {
// We can't reliably unpack a partial snapshot that needs more new space
@@ -1917,119 +2050,200 @@ bool NewSpace::ReserveSpace(int bytes) {
}
-void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
- SetTop(NULL, NULL);
-
- // Stop lazy sweeping and clear marking bits for unswept pages.
- if (first_unswept_page_ != NULL) {
- Page* last = last_unswept_page_;
- Page* p = first_unswept_page_;
- do {
- // Do not use ShouldBeSweptLazily predicate here.
- // New evacuation candidates were selected but they still have
- // to be swept before collection starts.
- if (!p->WasSwept()) {
- Bitmap::Clear(p);
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
- reinterpret_cast<intptr_t>(p));
- }
- }
- p = p->next_page();
- } while (p != last);
+void PagedSpace::FreePages(Page* prev, Page* last) {
+ if (last == AllocationTopPage()) {
+ // Pages are already at the end of used pages.
+ return;
}
- first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
+ Page* first = NULL;
+
+ // Remove pages from the list.
+ if (prev == NULL) {
+ first = first_page_;
+ first_page_ = last->next_page();
+ } else {
+ first = prev->next_page();
+ heap()->isolate()->memory_allocator()->SetNextPage(
+ prev, last->next_page());
+ }
+ // Attach it after the last page.
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
+ last_page_ = last;
+ heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
-bool PagedSpace::ReserveSpace(int size_in_bytes) {
- ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
- ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
- Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
+ // Clean them up.
+ do {
+ first->InvalidateWatermark(true);
+ first->SetAllocationWatermark(first->ObjectAreaStart());
+ first->SetCachedAllocationWatermark(first->ObjectAreaStart());
+ first->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ first = first->next_page();
+ } while (first != NULL);
+
+ // Order of pages in this space might no longer be consistent with
+ // order of pages in chunks.
+ page_list_is_chunk_ordered_ = false;
+}
+
+
+void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
+ const bool add_to_freelist = true;
+
+ // Mark used and unused pages to properly fill unused pages
+ // after reordering.
+ PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+ Page* last_in_use = AllocationTopPage();
+ bool in_use = true;
+
+ while (all_pages_iterator.has_next()) {
+ Page* p = all_pages_iterator.next();
+ p->SetWasInUseBeforeMC(in_use);
+ if (p == last_in_use) {
+ // We passed a page containing allocation top. All consequent
+ // pages are not used.
+ in_use = false;
+ }
+ }
- HeapObject* new_area = free_list_.Allocate(size_in_bytes);
- if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
- if (new_area == NULL) return false;
+ if (page_list_is_chunk_ordered_) return;
- int old_linear_size = static_cast<int>(limit() - top());
- // Mark the old linear allocation area with a free space so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- Free(top(), old_linear_size);
+ Page* new_last_in_use = Page::FromAddress(NULL);
+ heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
+ this, &first_page_, &last_page_, &new_last_in_use);
+ ASSERT(new_last_in_use->is_valid());
- SetTop(new_area->address(), new_area->address() + size_in_bytes);
- Allocate(size_in_bytes);
- return true;
-}
+ if (new_last_in_use != last_in_use) {
+ // Current allocation top points to a page which is now in the middle
+ // of page list. We should move allocation top forward to the new last
+ // used page so various object iterators will continue to work properly.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+ last_in_use->AllocationTop());
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
+ if (size_in_bytes > 0) {
+ Address start = last_in_use->AllocationTop();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
+ }
+ }
-// You have to call this last, since the implementation from PagedSpace
-// doesn't know that memory was 'promised' to large object space.
-bool LargeObjectSpace::ReserveSpace(int bytes) {
- return heap()->OldGenerationSpaceAvailable() >= bytes;
-}
+ // New last in use page was in the middle of the list before
+ // sorting so it full.
+ SetTop(new_last_in_use->AllocationTop());
+ ASSERT(AllocationTopPage() == new_last_in_use);
+ ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+ }
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsSweepingComplete()) return true;
+ PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+ while (pages_in_use_iterator.has_next()) {
+ Page* p = pages_in_use_iterator.next();
+ if (!p->WasInUseBeforeMC()) {
+ // Empty page is in the middle of a sequence of used pages.
+ // Allocate it as a whole and deallocate immediately.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+ p->ObjectAreaStart());
- intptr_t freed_bytes = 0;
- Page* last = last_unswept_page_;
- Page* p = first_unswept_page_;
- do {
- Page* next_page = p->next_page();
- if (ShouldBeSweptLazily(p)) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
- reinterpret_cast<intptr_t>(p));
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ Address start = p->ObjectAreaStart();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
- freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
- p = next_page;
- } while (p != last && freed_bytes < bytes_to_sweep);
-
- if (p == last) {
- last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
- } else {
- first_unswept_page_ = p;
}
- heap()->LowerOldGenLimits(freed_bytes);
+ page_list_is_chunk_ordered_ = true;
+}
- heap()->FreeQueuedChunks();
- return IsSweepingComplete();
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ RelinkPageListInChunkOrder(false);
+ }
}
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
+bool PagedSpace::ReserveSpace(int bytes) {
+ Address limit = allocation_info_.limit;
+ Address top = allocation_info_.top;
+ if (limit - top >= bytes) return true;
+
+ // There wasn't enough space in the current page. Lets put the rest
+ // of the page on the free list and start a fresh page.
+ PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
+
+ Page* reserved_page = TopPageOf(allocation_info_);
+ int bytes_left_to_reserve = bytes;
+ while (bytes_left_to_reserve > 0) {
+ if (!reserved_page->next_page()->is_valid()) {
+ if (heap()->OldGenerationAllocationLimitReached()) return false;
+ Expand(reserved_page);
+ }
+ bytes_left_to_reserve -= Page::kPageSize;
+ reserved_page = reserved_page->next_page();
+ if (!reserved_page->is_valid()) return false;
+ }
+ ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
+ TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
+ SetAllocationInfo(&allocation_info_,
+ TopPageOf(allocation_info_)->next_page());
+ return true;
+}
- if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
- // Create filler object to keep page iterable if it was iterable.
- int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
- }
+// You have to call this last, since the implementation from PagedSpace
+// doesn't know that memory was 'promised' to large object space.
+bool LargeObjectSpace::ReserveSpace(int bytes) {
+ return heap()->OldGenerationSpaceAvailable() >= bytes;
}
-HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
- // Allocation in this space has failed.
+// Slow case for normal allocation. Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
+ // Linear allocation in this space has failed. If there is another page
+ // in the space, move to that page and allocate there. This allocation
+ // should succeed (size_in_bytes should not be greater than a page's
+ // object area size).
+ Page* current_page = TopPageOf(allocation_info_);
+ if (current_page->next_page()->is_valid()) {
+ return AllocateInNextPage(current_page, size_in_bytes);
+ }
+
+ // There is no next page in this space. Try free list allocation unless that
+ // is currently forbidden.
+ if (!heap()->linear_allocation()) {
+ int wasted_bytes;
+ Object* result;
+ MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ if (maybe->ToObject(&result)) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+
+ HeapObject* obj = HeapObject::cast(result);
+ Page* p = Page::FromAddress(obj->address());
+
+ if (obj->address() >= p->AllocationWatermark()) {
+ // There should be no hole between the allocation watermark
+ // and allocated object address.
+ // Memory above the allocation watermark was not swept and
+ // might contain garbage pointers to new space.
+ ASSERT(obj->address() == p->AllocationWatermark());
+ p->SetAllocationWatermark(obj->address() + size_in_bytes);
+ }
+
+ return obj;
+ }
+ }
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
@@ -2039,30 +2253,61 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
return NULL;
}
- // If there are unswept pages advance lazy sweeper.
- if (first_unswept_page_->is_valid()) {
- AdvanceSweeper(size_in_bytes);
+ // Try to expand the space and allocate in the new next page.
+ ASSERT(!current_page->next_page()->is_valid());
+ if (Expand(current_page)) {
+ return AllocateInNextPage(current_page, size_in_bytes);
+ }
- // Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
+ // Finally, fail.
+ return NULL;
+}
- if (!IsSweepingComplete()) {
- AdvanceSweeper(kMaxInt);
- // Retry the free list allocation.
- object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
+void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ current_page->SetAllocationWatermark(allocation_info_.top);
+ int free_size =
+ static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
+ if (free_size > 0) {
+ int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
+ accounting_stats_.WasteBytes(wasted_bytes);
}
+}
- // Try to expand the space and allocate in the new next page.
- if (Expand()) {
- return free_list_.Allocate(size_in_bytes);
+
+void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ current_page->SetAllocationWatermark(allocation_info_.top);
+ int free_size =
+ static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
+ // In the fixed space free list all the free list items have the right size.
+ // We use up the rest of the page while preserving this invariant.
+ while (free_size >= object_size_in_bytes_) {
+ free_list_.Free(allocation_info_.top);
+ allocation_info_.top += object_size_in_bytes_;
+ free_size -= object_size_in_bytes_;
+ accounting_stats_.WasteBytes(object_size_in_bytes_);
}
+}
- // Finally, fail.
- return NULL;
+
+// Add the block at the top of the page to the space's free list, set the
+// allocation info to the next page (assumed to be one), and allocate
+// linearly there.
+HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
+ int size_in_bytes) {
+ ASSERT(current_page->next_page()->is_valid());
+ Page* next_page = current_page->next_page();
+ next_page->ClearGCFields();
+ PutRestOfCurrentPageOnFreeList(current_page);
+ SetAllocationInfo(&allocation_info_, next_page);
+ return AllocateLinearly(&allocation_info_, size_in_bytes);
+}
+
+
+void OldSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ Free(start, size_in_bytes, add_to_freelist);
}
@@ -2168,7 +2413,7 @@ static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
void PagedSpace::CollectCodeStatistics() {
Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
isolate->code_kind_statistics()[code->kind()] += code->Size();
@@ -2193,17 +2438,16 @@ void PagedSpace::CollectCodeStatistics() {
}
-void PagedSpace::ReportStatistics() {
+void OldSpace::ReportStatistics() {
int pct = static_cast<int>(Available() * 100 / Capacity());
PrintF(" capacity: %" V8_PTR_PREFIX "d"
", waste: %" V8_PTR_PREFIX "d"
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- if (was_swept_conservatively_) return;
ClearHistograms();
HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(true);
}
@@ -2212,28 +2456,192 @@ void PagedSpace::ReportStatistics() {
// -----------------------------------------------------------------------------
// FixedSpace implementation
-void FixedSpace::PrepareForMarkCompact() {
+void FixedSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact();
+ PagedSpace::PrepareForMarkCompact(will_compact);
- // During a non-compacting collection, everything below the linear
- // allocation pointer except wasted top-of-page blocks is considered
- // allocated and we will rediscover available bytes during the
- // collection.
- accounting_stats_.AllocateBytes(free_list_.available());
+ if (will_compact) {
+ // Reset relocation info.
+ MCResetRelocationInfo();
+
+ // During a compacting collection, everything in the space is considered
+ // 'available' (set by the call to MCResetRelocationInfo) and we will
+ // rediscover live and wasted bytes during the collection.
+ ASSERT(Available() == Capacity());
+ } else {
+ // During a non-compacting collection, everything below the linear
+ // allocation pointer except wasted top-of-page blocks is considered
+ // allocated and we will rediscover available bytes during the
+ // collection.
+ accounting_stats_.AllocateBytes(free_list_.available());
+ }
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
}
+void FixedSpace::MCCommitRelocationInfo() {
+ // Update fast allocation info.
+ allocation_info_.top = mc_forwarding_info_.top;
+ allocation_info_.limit = mc_forwarding_info_.limit;
+ ASSERT(allocation_info_.VerifyPagedAllocation());
+
+ // The space is compacted and we haven't yet wasted any space.
+ ASSERT(Waste() == 0);
+
+ // Update allocation_top of each page in use and compute waste.
+ int computed_size = 0;
+ PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+ while (it.has_next()) {
+ Page* page = it.next();
+ Address page_top = page->AllocationTop();
+ computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
+ if (it.has_next()) {
+ accounting_stats_.WasteBytes(
+ static_cast<int>(page->ObjectAreaEnd() - page_top));
+ page->SetAllocationWatermark(page_top);
+ }
+ }
+
+ // Make sure the computed size - based on the used portion of the
+ // pages in use - matches the size we adjust during allocation.
+ ASSERT(computed_size == Size());
+}
+
+
+// Slow case for normal allocation. Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
+ ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+ // Linear allocation in this space has failed. If there is another page
+ // in the space, move to that page and allocate there. This allocation
+ // should succeed.
+ Page* current_page = TopPageOf(allocation_info_);
+ if (current_page->next_page()->is_valid()) {
+ return AllocateInNextPage(current_page, size_in_bytes);
+ }
+
+ // There is no next page in this space. Try free list allocation unless
+ // that is currently forbidden. The fixed space free list implicitly assumes
+ // that all free blocks are of the fixed size.
+ if (!heap()->linear_allocation()) {
+ Object* result;
+ MaybeObject* maybe = free_list_.Allocate();
+ if (maybe->ToObject(&result)) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ HeapObject* obj = HeapObject::cast(result);
+ Page* p = Page::FromAddress(obj->address());
+
+ if (obj->address() >= p->AllocationWatermark()) {
+ // There should be no hole between the allocation watermark
+ // and allocated object address.
+ // Memory above the allocation watermark was not swept and
+ // might contain garbage pointers to new space.
+ ASSERT(obj->address() == p->AllocationWatermark());
+ p->SetAllocationWatermark(obj->address() + size_in_bytes);
+ }
+
+ return obj;
+ }
+ }
+
+ // Free list allocation failed and there is no next page. Fail if we have
+ // hit the old generation size limit that should cause a garbage
+ // collection.
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
+ return NULL;
+ }
+
+ // Try to expand the space and allocate in the new next page.
+ ASSERT(!current_page->next_page()->is_valid());
+ if (Expand(current_page)) {
+ return AllocateInNextPage(current_page, size_in_bytes);
+ }
+
+ // Finally, fail.
+ return NULL;
+}
+
+
+// Move to the next page (there is assumed to be one) and allocate there.
+// The top of page block is always wasted, because it is too small to hold a
+// map.
+HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
+ int size_in_bytes) {
+ ASSERT(current_page->next_page()->is_valid());
+ ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
+ ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+ Page* next_page = current_page->next_page();
+ next_page->ClearGCFields();
+ current_page->SetAllocationWatermark(allocation_info_.top);
+ accounting_stats_.WasteBytes(page_extra_);
+ SetAllocationInfo(&allocation_info_, next_page);
+ return AllocateLinearly(&allocation_info_, size_in_bytes);
+}
+
+
+void FixedSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ // Free-list elements in fixed space are assumed to have a fixed size.
+ // We break the free block into chunks and add them to the free list
+ // individually.
+ int size = object_size_in_bytes();
+ ASSERT(size_in_bytes % size == 0);
+ Address end = start + size_in_bytes;
+ for (Address a = start; a < end; a += size) {
+ Free(a, add_to_freelist);
+ }
+}
+
+
+#ifdef DEBUG
+void FixedSpace::ReportStatistics() {
+ int pct = static_cast<int>(Available() * 100 / Capacity());
+ PrintF(" capacity: %" V8_PTR_PREFIX "d"
+ ", waste: %" V8_PTR_PREFIX "d"
+ ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+ Capacity(), Waste(), Available(), pct);
+
+ ClearHistograms();
+ HeapObjectIterator obj_it(this);
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+ CollectHistogramInfo(obj);
+ ReportHistogram(false);
+}
+#endif
+
+
// -----------------------------------------------------------------------------
// MapSpace implementation
+void MapSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ FixedSpace::PrepareForMarkCompact(will_compact);
+
+ if (will_compact) {
+ // Initialize map index entry.
+ int page_count = 0;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ ASSERT_MAP_PAGE_INDEX(page_count);
+
+ Page* p = it.next();
+ ASSERT(p->mc_page_index == page_count);
+
+ page_addresses_[page_count++] = p->address();
+ }
+ }
+}
+
+
#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
- ASSERT(object->IsMap() || object->IsFreeSpace());
+ ASSERT(object->IsMap() || object->IsByteArray());
}
#endif
@@ -2254,40 +2662,107 @@ void CellSpace::VerifyObject(HeapObject* object) {
// LargeObjectIterator
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
- current_ = space->first_page_;
+ current_ = space->first_chunk_;
size_func_ = NULL;
}
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
HeapObjectCallback size_func) {
- current_ = space->first_page_;
+ current_ = space->first_chunk_;
size_func_ = size_func;
}
-HeapObject* LargeObjectIterator::Next() {
+HeapObject* LargeObjectIterator::next() {
if (current_ == NULL) return NULL;
HeapObject* object = current_->GetObject();
- current_ = current_->next_page();
+ current_ = current_->next();
return object;
}
// -----------------------------------------------------------------------------
+// LargeObjectChunk
+
+LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
+ Executability executable) {
+ size_t requested = ChunkSizeFor(size_in_bytes);
+ size_t size;
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ Isolate* isolate = Isolate::Current();
+ void* mem = isolate->memory_allocator()->AllocateRawMemory(
+ requested + guard_size, &size, executable);
+ if (mem == NULL) return NULL;
+
+ // The start of the chunk may be overlayed with a page so we have to
+ // make sure that the page flags fit in the size field.
+ ASSERT((size & Page::kPageFlagMask) == 0);
+
+ LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
+ if (size < requested + guard_size) {
+ isolate->memory_allocator()->FreeRawMemory(
+ mem, size, executable);
+ LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
+ return NULL;
+ }
+
+ if (guard_size != 0) {
+ OS::Guard(mem, guard_size);
+ size -= guard_size;
+ mem = static_cast<Address>(mem) + guard_size;
+ }
+
+ ObjectSpace space = (executable == EXECUTABLE)
+ ? kObjectSpaceCodeSpace
+ : kObjectSpaceLoSpace;
+ isolate->memory_allocator()->PerformAllocationCallback(
+ space, kAllocationActionAllocate, size);
+
+ LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
+ chunk->size_ = size;
+ chunk->GetPage()->heap_ = isolate->heap();
+ return chunk;
+}
+
+
+void LargeObjectChunk::Free(Executability executable) {
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ ObjectSpace space =
+ (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
+ // Do not access instance fields after FreeRawMemory!
+ Address my_address = address();
+ size_t my_size = size();
+ Isolate* isolate = GetPage()->heap_->isolate();
+ MemoryAllocator* a = isolate->memory_allocator();
+ a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
+ a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
+ LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
+}
+
+
+int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
+ int os_alignment = static_cast<int>(OS::AllocateAlignment());
+ if (os_alignment < Page::kPageSize) {
+ size_in_bytes += (Page::kPageSize - os_alignment);
+ }
+ return size_in_bytes + Page::kObjectStartOffset;
+}
+
+// -----------------------------------------------------------------------------
// LargeObjectSpace
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- first_page_(NULL),
+ first_chunk_(NULL),
size_(0),
page_count_(0),
objects_size_(0) {}
bool LargeObjectSpace::Setup() {
- first_page_ = NULL;
+ first_chunk_ = NULL;
size_ = 0;
page_count_ = 0;
objects_size_ = 0;
@@ -2296,22 +2771,20 @@ bool LargeObjectSpace::Setup() {
void LargeObjectSpace::TearDown() {
- while (first_page_ != NULL) {
- LargePage* page = first_page_;
- first_page_ = first_page_->next_page();
- LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
- ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionFree, page->size());
- heap()->isolate()->memory_allocator()->Free(page);
+ while (first_chunk_ != NULL) {
+ LargeObjectChunk* chunk = first_chunk_;
+ first_chunk_ = first_chunk_->next();
+ chunk->Free(chunk->GetPage()->PageExecutability());
}
Setup();
}
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
- Executability executable) {
+MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
+ int object_size,
+ Executability executable) {
+ ASSERT(0 < object_size && object_size <= requested_size);
+
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
@@ -2319,42 +2792,75 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
return Failure::RetryAfterGC(identity());
}
- LargePage* page = heap()->isolate()->memory_allocator()->
- AllocateLargePage(object_size, executable, this);
- if (page == NULL) return Failure::RetryAfterGC(identity());
- ASSERT(page->body_size() >= object_size);
+ LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
+ if (chunk == NULL) {
+ return Failure::RetryAfterGC(identity());
+ }
- size_ += static_cast<int>(page->size());
- objects_size_ += object_size;
+ size_ += static_cast<int>(chunk->size());
+ objects_size_ += requested_size;
page_count_++;
- page->set_next_page(first_page_);
- first_page_ = page;
+ chunk->set_next(first_chunk_);
+ first_chunk_ = chunk;
+
+ // Initialize page header.
+ Page* page = chunk->GetPage();
+ Address object_address = page->ObjectAreaStart();
+
+ // Clear the low order bit of the second word in the page to flag it as a
+ // large object page. If the chunk_size happened to be written there, its
+ // low order bit should already be clear.
+ page->SetIsLargeObjectPage(true);
+ page->SetPageExecutability(executable);
+ page->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ return HeapObject::FromAddress(object_address);
+}
+
+
+MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
+ return AllocateRawInternal(size_in_bytes,
+ size_in_bytes,
+ EXECUTABLE);
+}
+
- heap()->incremental_marking()->OldSpaceStep(object_size);
- return page->GetObject();
+MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
+ return AllocateRawInternal(size_in_bytes,
+ size_in_bytes,
+ NOT_EXECUTABLE);
+}
+
+
+MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
+ return AllocateRawInternal(size_in_bytes,
+ size_in_bytes,
+ NOT_EXECUTABLE);
}
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
- for (LargePage* page = first_page_;
- page != NULL;
- page = page->next_page()) {
- Address page_address = page->address();
- if (page_address <= a && a < page_address + page->size()) {
- return page->GetObject();
+ for (LargeObjectChunk* chunk = first_chunk_;
+ chunk != NULL;
+ chunk = chunk->next()) {
+ Address chunk_address = chunk->address();
+ if (chunk_address <= a && a < chunk_address + chunk->size()) {
+ return chunk->GetObject();
}
}
return Failure::Exception();
}
-LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
+LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
// TODO(853): Change this implementation to only find executable
// chunks and use some kind of hash-based approach to speed it up.
- for (LargePage* chunk = first_page_;
+ for (LargeObjectChunk* chunk = first_chunk_;
chunk != NULL;
- chunk = chunk->next_page()) {
+ chunk = chunk->next()) {
Address chunk_address = chunk->address();
if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
return chunk;
@@ -2364,57 +2870,112 @@ LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
}
+void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
+ LargeObjectIterator it(this);
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ // We only have code, sequential strings, or fixed arrays in large
+ // object space, and only fixed arrays can possibly contain pointers to
+ // the young generation.
+ if (object->IsFixedArray()) {
+ Page* page = Page::FromAddress(object->address());
+ uint32_t marks = page->GetRegionMarks();
+ uint32_t newmarks = Page::kAllRegionsCleanMarks;
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ // For a large page a single dirty mark corresponds to several
+ // regions (modulo 32). So we treat a large page as a sequence of
+ // normal pages of size Page::kPageSize having same dirty marks
+ // and subsequently iterate dirty regions on each of these pages.
+ Address start = object->address();
+ Address end = page->ObjectAreaEnd();
+ Address object_end = start + object->Size();
+
+ // Iterate regions of the first normal page covering object.
+ uint32_t first_region_number = page->GetRegionNumberForAddress(start);
+ newmarks |=
+ heap()->IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object) << first_region_number;
+
+ start = end;
+ end = start + Page::kPageSize;
+ while (end <= object_end) {
+ // Iterate next 32 regions.
+ newmarks |=
+ heap()->IterateDirtyRegions(marks,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
+ start = end;
+ end = start + Page::kPageSize;
+ }
+
+ if (start != object_end) {
+ // Iterate the last piece of an object which is less than
+ // Page::kPageSize.
+ newmarks |=
+ heap()->IterateDirtyRegions(marks,
+ start,
+ object_end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
+ }
+
+ page->SetRegionMarks(newmarks);
+ }
+ }
+ }
+}
+
+
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = NULL;
- LargePage* current = first_page_;
+ LargeObjectChunk* previous = NULL;
+ LargeObjectChunk* current = first_chunk_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- // Can this large page contain pointers to non-trivial objects. No other
- // pointer object is this big.
- bool is_pointer_object = object->IsFixedArray();
- MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+ if (object->IsMarked()) {
+ object->ClearMark();
+ heap()->mark_compact_collector()->tracer()->decrement_marked_count();
previous = current;
- current = current->next_page();
+ current = current->next();
} else {
- LargePage* page = current;
// Cut the chunk out from the chunk list.
- current = current->next_page();
+ LargeObjectChunk* current_chunk = current;
+ current = current->next();
if (previous == NULL) {
- first_page_ = current;
+ first_chunk_ = current;
} else {
- previous->set_next_page(current);
+ previous->set_next(current);
}
// Free the chunk.
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
object, heap()->isolate());
- size_ -= static_cast<int>(page->size());
+ LiveObjectList::ProcessNonLive(object);
+
+ size_ -= static_cast<int>(current_chunk->size());
objects_size_ -= object->Size();
page_count_--;
-
- if (is_pointer_object) {
- heap()->QueueMemoryChunkForFree(page);
- } else {
- heap()->isolate()->memory_allocator()->Free(page);
- }
+ current_chunk->Free(current_chunk->GetPage()->PageExecutability());
}
}
- heap()->FreeQueuedChunks();
}
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
-
- bool owned = (chunk->owner() == this);
+ if (heap()->new_space()->Contains(address)) {
+ return false;
+ }
+ Page* page = Page::FromAddress(address);
- SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+ SLOW_ASSERT(!page->IsLargeObjectPage()
+ || !FindObject(address)->IsFailure());
- return owned;
+ return page->IsLargeObjectPage();
}
@@ -2422,9 +2983,9 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
- for (LargePage* chunk = first_page_;
+ for (LargeObjectChunk* chunk = first_chunk_;
chunk != NULL;
- chunk = chunk->next_page()) {
+ chunk = chunk->next()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
HeapObject* object = chunk->GetObject();
@@ -2454,6 +3015,9 @@ void LargeObjectSpace::Verify() {
object->Size(),
&code_visitor);
} else if (object->IsFixedArray()) {
+ // We loop over fixed arrays ourselves, rather then using the visitor,
+ // because the visitor doesn't support the start/offset iteration
+ // needed for IsRegionDirty.
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
@@ -2461,6 +3025,13 @@ void LargeObjectSpace::Verify() {
HeapObject* element_object = HeapObject::cast(element);
ASSERT(heap()->Contains(element_object));
ASSERT(element_object->map()->IsMap());
+ if (heap()->InNewSpace(element_object)) {
+ Address array_addr = object->address();
+ Address element_addr = array_addr + FixedArray::kHeaderSize +
+ j * kPointerSize;
+
+ ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
+ }
}
}
}
@@ -2470,7 +3041,7 @@ void LargeObjectSpace::Verify() {
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
obj->Print();
}
}
@@ -2481,7 +3052,7 @@ void LargeObjectSpace::ReportStatistics() {
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
num_objects++;
CollectHistogramInfo(obj);
}
@@ -2495,38 +3066,13 @@ void LargeObjectSpace::ReportStatistics() {
void LargeObjectSpace::CollectCodeStatistics() {
Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
isolate->code_kind_statistics()[code->kind()] += code->Size();
}
}
}
-
-
-void Page::Print() {
- // Make a best-effort to print the objects in the page.
- PrintF("Page@%p in %s\n",
- this->address(),
- AllocationSpaceName(this->owner()->identity()));
- printf(" --------------------------------------\n");
- HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
- unsigned mark_size = 0;
- for (HeapObject* object = objects.Next();
- object != NULL;
- object = objects.Next()) {
- bool is_marked = Marking::MarkBitFrom(object).Get();
- PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
- if (is_marked) {
- mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
- }
- object->ShortPrint();
- PrintF("\n");
- }
- printf(" --------------------------------------\n");
- printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
-}
-
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index ce8e382aaa..f1564967e1 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -49,47 +49,45 @@ class Isolate;
//
// The semispaces of the young generation are contiguous. The old and map
// spaces consists of a list of pages. A page has a page header and an object
-// area.
+// area. A page size is deliberately chosen as 8K bytes.
+// The first word of a page is an opaque page header that has the
+// address of the next page and its ownership information. The second word may
+// have the allocation top address of this page. Heap objects are aligned to the
+// pointer size.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged. Pages in large object space
-// may be larger than the page size.
+// may be larger than 8K.
//
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references. See store-buffer.h.
+// A card marking write barrier is used to keep track of intergenerational
+// references. Old space pages are divided into regions of Page::kRegionSize
+// size. Each region has a corresponding dirty bit in the page header which is
+// set if the region might contain pointers to new space. For details about
+// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
+// method body.
//
-// During scavenges and mark-sweep collections we sometimes (after a store
-// buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
-// garbage pointers to new space: every pointer aligned word which satisfies
-// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
-// special layout (e.g. no bare integer fields). This requirement does not
-// apply to map space which is iterated in a special fashion. However we still
-// require pointer fields of dead maps to be cleaned.
+// During scavenges and mark-sweep collections we iterate intergenerational
+// pointers without decoding heap object maps so if the page belongs to old
+// pointer space or large object space it is essential to guarantee that
+// the page does not contain any garbage pointers to new space: every pointer
+// aligned word which satisfies the Heap::InNewSpace() predicate must be a
+// pointer to a live heap object in new space. Thus objects in old pointer
+// and large object spaces should have a special layout (e.g. no bare integer
+// fields). This requirement does not apply to map space which is iterated in
+// a special fashion. However we still require pointer fields of dead maps to
+// be cleaned.
//
-// To enable lazy cleaning of old space pages we can mark chunks of the page
-// as being garbage. Garbage sections are marked with a special map. These
-// sections are skipped when scanning the page, even if we are otherwise
-// scanning without regard for object boundaries. Garbage sections are chained
-// together to form a free list after a GC. Garbage sections created outside
-// of GCs by object trunctation etc. may not be in the free list chain. Very
-// small free spaces are ignored, they need only be cleaned of bogus pointers
-// into new space.
+// To enable lazy cleaning of old space pages we use a notion of allocation
+// watermark. Every pointer under watermark is considered to be well formed.
+// Page allocation watermark is not necessarily equal to page allocation top but
+// all alive objects on page should reside under allocation watermark.
+// During scavenge allocation watermark might be bumped and invalid pointers
+// might appear below it. To avoid following them we store a valid watermark
+// into special field in the page header and set a page WATERMARK_INVALIDATED
+// flag. For details see comments in the Page::SetAllocationWatermark() method
+// body.
//
-// Each page may have up to one special garbage section. The start of this
-// section is denoted by the top field in the space. The end of the section
-// is denoted by the limit field in the space. This special garbage section
-// is not marked with a free space map in the data. The point of this section
-// is to enable linear allocation without having to constantly update the byte
-// array every time the top field is updated and a new object is created. The
-// special garbage section is not in the chain of garbage sections.
-//
-// Since the top and limit fields are in the space, not the page, only one page
-// has a special garbage section, and if the top and limit are equal then there
-// is no special garbage section.
// Some assertion macros used in the debugging mode.
@@ -116,522 +114,30 @@ class Isolate;
class PagedSpace;
class MemoryAllocator;
class AllocationInfo;
-class Space;
-class FreeList;
-class MemoryChunk;
-
-class MarkBit {
- public:
- typedef uint32_t CellType;
-
- inline MarkBit(CellType* cell, CellType mask, bool data_only)
- : cell_(cell), mask_(mask), data_only_(data_only) { }
-
- inline CellType* cell() { return cell_; }
- inline CellType mask() { return mask_; }
-
-#ifdef DEBUG
- bool operator==(const MarkBit& other) {
- return cell_ == other.cell_ && mask_ == other.mask_;
- }
-#endif
-
- inline void Set() { *cell_ |= mask_; }
- inline bool Get() { return (*cell_ & mask_) != 0; }
- inline void Clear() { *cell_ &= ~mask_; }
-
- inline bool data_only() { return data_only_; }
-
- inline MarkBit Next() {
- CellType new_mask = mask_ << 1;
- if (new_mask == 0) {
- return MarkBit(cell_ + 1, 1, data_only_);
- } else {
- return MarkBit(cell_, new_mask, data_only_);
- }
- }
-
- private:
- CellType* cell_;
- CellType mask_;
- // This boolean indicates that the object is in a data-only space with no
- // pointers. This enables some optimizations when marking.
- // It is expected that this field is inlined and turned into control flow
- // at the place where the MarkBit object is created.
- bool data_only_;
-};
-
-
-// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
- public:
- static const uint32_t kBitsPerCell = 32;
- static const uint32_t kBitsPerCellLog2 = 5;
- static const uint32_t kBitIndexMask = kBitsPerCell - 1;
- static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
- static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
-
- static const size_t kLength =
- (1 << kPageSizeBits) >> (kPointerSizeLog2);
-
- static const size_t kSize =
- (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
-
-
- static int CellsForLength(int length) {
- return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
- }
-
- int CellsCount() {
- return CellsForLength(kLength);
- }
-
- static int SizeFor(int cells_count) {
- return sizeof(MarkBit::CellType) * cells_count;
- }
-
- INLINE(static uint32_t IndexToCell(uint32_t index)) {
- return index >> kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellToIndex(uint32_t index)) {
- return index << kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
- return (index + kBitIndexMask) & ~kBitIndexMask;
- }
-
- INLINE(MarkBit::CellType* cells()) {
- return reinterpret_cast<MarkBit::CellType*>(this);
- }
-
- INLINE(Address address()) {
- return reinterpret_cast<Address>(this);
- }
-
- INLINE(static Bitmap* FromAddress(Address addr)) {
- return reinterpret_cast<Bitmap*>(addr);
- }
-
- inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
- MarkBit::CellType mask = 1 << (index & kBitIndexMask);
- MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(cell, mask, data_only);
- }
-
- static inline void Clear(MemoryChunk* chunk);
-
- static void PrintWord(uint32_t word, uint32_t himask = 0) {
- for (uint32_t mask = 1; mask != 0; mask <<= 1) {
- if ((mask & himask) != 0) PrintF("[");
- PrintF((mask & word) ? "1" : "0");
- if ((mask & himask) != 0) PrintF("]");
- }
- }
-
- class CellPrinter {
- public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
-
- void Print(uint32_t pos, uint32_t cell) {
- if (cell == seq_type) {
- seq_length++;
- return;
- }
-
- Flush();
-
- if (IsSeq(cell)) {
- seq_start = pos;
- seq_length = 0;
- seq_type = cell;
- return;
- }
-
- PrintF("%d: ", pos);
- PrintWord(cell);
- PrintF("\n");
- }
-
- void Flush() {
- if (seq_length > 0) {
- PrintF("%d: %dx%d\n",
- seq_start,
- seq_type == 0 ? 0 : 1,
- seq_length * kBitsPerCell);
- seq_length = 0;
- }
- }
-
- static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
-
- private:
- uint32_t seq_start;
- uint32_t seq_type;
- uint32_t seq_length;
- };
-
- void Print() {
- CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
- printer.Print(i, cells()[i]);
- }
- printer.Flush();
- PrintF("\n");
- }
-
- bool IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) return false;
- }
- return true;
- }
-};
-
-
-class SkipList;
-class SlotsBuffer;
-
-// MemoryChunk represents a memory region owned by a specific space.
-// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accomodate
-// any heap object.
-class MemoryChunk {
- public:
- // Only works if the pointer is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromAddress(Address a) {
- return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
- }
-
- // Only works for addresses in pointer spaces, not data or code spaces.
- static inline MemoryChunk* FromAnyPointerAddress(Address addr);
-
- Address address() { return reinterpret_cast<Address>(this); }
-
- bool is_valid() { return address() != NULL; }
-
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
-
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
-
- Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag) {
- return reinterpret_cast<Space*>(owner_ - kFailureTag);
- } else {
- return NULL;
- }
- }
-
- void set_owner(Space* space) {
- ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
- owner_ = reinterpret_cast<Address>(space) + kFailureTag;
- ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
- kFailureTag);
- }
-
- VirtualMemory* reserved_memory() {
- return &reservation_;
- }
-
- void InitializeReservedMemory() {
- reservation_.Reset();
- }
-
- void set_reserved_memory(VirtualMemory* reservation) {
- ASSERT_NOT_NULL(reservation);
- reservation_.TakeControl(reservation);
- }
-
- bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
- void initialize_scan_on_scavenge(bool scan) {
- if (scan) {
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- }
- inline void set_scan_on_scavenge(bool scan);
-
- int store_buffer_counter() { return store_buffer_counter_; }
- void set_store_buffer_counter(int counter) {
- store_buffer_counter_ = counter;
- }
-
- Address body() { return address() + kObjectStartOffset; }
-
- Address body_limit() { return address() + size(); }
-
- int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
-
- bool Contains(Address addr) {
- return addr >= body() && addr < address() + size();
- }
-
- // Checks whether addr can be a limit of addresses in this page.
- // It's a limit if it's in the page, or if it's just after the
- // last byte of the page.
- bool ContainsLimit(Address addr) {
- return addr >= body() && addr <= address() + size();
- }
-
- enum MemoryChunkFlags {
- IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
- POINTERS_TO_HERE_ARE_INTERESTING,
- POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
- IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
- IN_TO_SPACE, // All pages in new space has one of these two set.
- NEW_SPACE_BELOW_AGE_MARK,
- CONTAINS_ONLY_DATA,
- EVACUATION_CANDIDATE,
- RESCAN_ON_EVACUATION,
-
- // Pages swept precisely can be iterated, hitting only the live objects.
- // Whereas those swept conservatively cannot be iterated over. Both flags
- // indicate that marking bits have been cleared by the sweeper, otherwise
- // marking bits are still intact.
- WAS_SWEPT_PRECISELY,
- WAS_SWEPT_CONSERVATIVELY,
-
- // Last flag, keep at bottom.
- NUM_MEMORY_CHUNK_FLAGS
- };
-
-
- static const int kPointersToHereAreInterestingMask =
- 1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const int kPointersFromHereAreInterestingMask =
- 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const int kEvacuationCandidateMask =
- 1 << EVACUATION_CANDIDATE;
-
- static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) |
- (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) |
- (1 << IN_TO_SPACE);
-
-
- void SetFlag(int flag) {
- flags_ |= static_cast<uintptr_t>(1) << flag;
- }
-
- void ClearFlag(int flag) {
- flags_ &= ~(static_cast<uintptr_t>(1) << flag);
- }
-
- void SetFlagTo(int flag, bool value) {
- if (value) {
- SetFlag(flag);
- } else {
- ClearFlag(flag);
- }
- }
-
- bool IsFlagSet(int flag) {
- return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
- }
-
- // Set or clear multiple flags at a time. The flags in the mask
- // are set to the value in "flags", the rest retain the current value
- // in flags_.
- void SetFlags(intptr_t flags, intptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- intptr_t GetFlags() { return flags_; }
-
- // Manage live byte count (count of bytes known to be live,
- // because they are marked black).
- void ResetLiveBytes() {
- if (FLAG_gc_verbose) {
- PrintF("ResetLiveBytes:%p:%x->0\n",
- static_cast<void*>(this), live_byte_count_);
- }
- live_byte_count_ = 0;
- }
- void IncrementLiveBytes(int by) {
- ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
- if (FLAG_gc_verbose) {
- printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
- static_cast<void*>(this), live_byte_count_,
- ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
- live_byte_count_ + by);
- }
- live_byte_count_ += by;
- ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
- }
- int LiveBytes() {
- ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
- return live_byte_count_;
- }
- static void IncrementLiveBytes(Address address, int by) {
- MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
- }
-
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
-
- static const intptr_t kLiveBytesOffset =
- kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
- kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
- static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
- static const size_t kHeaderSize =
- kSlotsBufferOffset + kPointerSize + kPointerSize;
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset = kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
- size_t size() const { return size_; }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool ContainsOnlyData() {
- return IsFlagSet(CONTAINS_ONLY_DATA);
- }
-
- bool InNewSpace() {
- return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
- }
-
- bool InToSpace() {
- return IsFlagSet(IN_TO_SPACE);
- }
-
- bool InFromSpace() {
- return IsFlagSet(IN_FROM_SPACE);
- }
-
- // ---------------------------------------------------------------------
- // Markbits support
-
- inline Bitmap* markbits() {
- return Bitmap::FromAddress(address() + kHeaderSize);
- }
-
- void PrintMarkbits() { markbits()->Print(); }
-
- inline uint32_t AddressToMarkbitIndex(Address addr) {
- return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
- }
-
- inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset =
- reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
- inline Address MarkbitIndexToAddress(uint32_t index) {
- return this->address() + (index << kPointerSizeLog2);
- }
-
- void InsertAfter(MemoryChunk* other);
- void Unlink();
-
- inline Heap* heap() { return heap_; }
-
- static const int kFlagsOffset = kPointerSize * 3;
-
- bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
-
- bool ShouldSkipEvacuationSlotRecording() {
- return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
- }
-
- inline SkipList* skip_list() {
- return skip_list_;
- }
-
- inline void set_skip_list(SkipList* skip_list) {
- skip_list_ = skip_list;
- }
-
- inline SlotsBuffer* slots_buffer() {
- return slots_buffer_;
- }
-
- inline SlotsBuffer** slots_buffer_address() {
- return &slots_buffer_;
- }
-
- void MarkEvacuationCandidate() {
- ASSERT(slots_buffer_ == NULL);
- SetFlag(EVACUATION_CANDIDATE);
- }
-
- void ClearEvacuationCandidate() {
- ASSERT(slots_buffer_ == NULL);
- ClearFlag(EVACUATION_CANDIDATE);
- }
-
-
- protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
- size_t size_;
- intptr_t flags_;
- // If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
- // The identity of the owning space. This is tagged as a failure pointer, but
- // no failure can be in an object, so this can be distinguished from any entry
- // in a fixed array.
- Address owner_;
- Heap* heap_;
- // Used by the store buffer to keep track of which pages to mark scan-on-
- // scavenge.
- int store_buffer_counter_;
- // Count of bytes marked black on page.
- int live_byte_count_;
- SlotsBuffer* slots_buffer_;
- SkipList* skip_list_;
-
- static MemoryChunk* Initialize(Heap* heap,
- Address base,
- size_t size,
- Executability executable,
- Space* owner);
-
- friend class MemoryAllocator;
-};
-
-STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+// A page normally has 8K bytes. Large object pages may be larger. A page
+// address is always aligned to the 8K page size.
+//
+// Each page starts with a header of Page::kPageHeaderSize size which contains
+// bookkeeping data.
+//
+// The mark-compact collector transforms a map pointer into a page index and a
+// page offset. The exact encoding is described in the comments for
+// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
// Page* p = Page::FromAllocationTop(top);
-class Page : public MemoryChunk {
+class Page {
public:
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[
- // This only works if the object is in fact in a page. See also MemoryChunk::
- // FromAddress() and FromAnyAddress().
+ //
+ // Note that this function only works for addresses in normal paged
+ // spaces and addresses in the first 8K of large object pages (i.e.,
+ // the start of large objects but not necessarily derived pointers
+ // within them).
INLINE(static Page* FromAddress(Address a)) {
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
}
@@ -646,11 +152,30 @@ class Page : public MemoryChunk {
return p;
}
- // Returns the next page in the chain of pages owned by a space.
+ // Returns the start address of this page.
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ // Checks whether this is a valid page address.
+ bool is_valid() { return address() != NULL; }
+
+ // Returns the next page of this page.
inline Page* next_page();
- inline Page* prev_page();
- inline void set_next_page(Page* page);
- inline void set_prev_page(Page* page);
+
+ // Return the end of allocation in this page. Undefined for unused pages.
+ inline Address AllocationTop();
+
+ // Return the allocation watermark for the page.
+ // For old space pages it is guaranteed that the area under the watermark
+ // does not contain any garbage pointers to new space.
+ inline Address AllocationWatermark();
+
+ // Return the allocation watermark offset from the beginning of the page.
+ inline uint32_t AllocationWatermarkOffset();
+
+ inline void SetAllocationWatermark(Address allocation_watermark);
+
+ inline void SetCachedAllocationWatermark(Address allocation_watermark);
+ inline Address CachedAllocationWatermark();
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
@@ -663,6 +188,22 @@ class Page : public MemoryChunk {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
}
+ // True if this page was in use before current compaction started.
+ // Result is valid only for pages owned by paged spaces and
+ // only after PagedSpace::PrepareForMarkCompact was called.
+ inline bool WasInUseBeforeMC();
+
+ inline void SetWasInUseBeforeMC(bool was_in_use);
+
+ // True if this page is a large object page.
+ inline bool IsLargeObjectPage();
+
+ inline void SetIsLargeObjectPage(bool is_large_object_page);
+
+ inline Executability PageExecutability();
+
+ inline void SetPageExecutability(Executability executable);
+
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
@@ -677,6 +218,24 @@ class Page : public MemoryChunk {
}
// ---------------------------------------------------------------------
+ // Card marking support
+
+ static const uint32_t kAllRegionsCleanMarks = 0x0;
+ static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
+
+ inline uint32_t GetRegionMarks();
+ inline void SetRegionMarks(uint32_t dirty);
+
+ inline uint32_t GetRegionMaskForAddress(Address addr);
+ inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
+ inline int GetRegionNumberForAddress(Address addr);
+
+ inline void MarkRegionDirty(Address addr);
+ inline bool IsRegionDirty(Address addr);
+
+ inline void ClearRegionMarks(Address start,
+ Address end,
+ bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -684,69 +243,118 @@ class Page : public MemoryChunk {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+ static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
+ kIntSize + kPointerSize + kPointerSize;
+
+ // The start offset of the object area in a page. Aligned to both maps and
+ // code alignment to be suitable for both.
+ static const int kObjectStartOffset =
+ CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
+
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
- static const int kFirstUsedCell =
- (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
-
- static const int kLastUsedCell =
- ((kPageSize - kPointerSize)/kPointerSize) >>
- Bitmap::kBitsPerCellLog2;
-
- inline void ClearGCFields();
-
- static inline Page* Initialize(Heap* heap,
- MemoryChunk* chunk,
- Executability executable,
- PagedSpace* owner);
-
- void InitializeAsAnchor(PagedSpace* owner);
-
- bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
- bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
- bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
+ static const int kDirtyFlagOffset = 2 * kPointerSize;
+ static const int kRegionSizeLog2 = 8;
+ static const int kRegionSize = 1 << kRegionSizeLog2;
+ static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
- void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
- void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
+ STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
- void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
- void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+ enum PageFlag {
+ IS_NORMAL_PAGE = 0,
+ WAS_IN_USE_BEFORE_MC,
-#ifdef DEBUG
- void Print();
-#endif // DEBUG
+ // Page allocation watermark was bumped by preallocation during scavenge.
+ // Correct watermark can be retrieved by CachedAllocationWatermark() method
+ WATERMARK_INVALIDATED,
+ IS_EXECUTABLE,
+ NUM_PAGE_FLAGS // Must be last
+ };
+ static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
+
+ // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
+ // scavenge we just invalidate the watermark on each old space page after
+ // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
+ // flag at the beginning of the next scavenge and each page becomes marked as
+ // having a valid watermark.
+ //
+ // The following invariant must hold for pages in old pointer and map spaces:
+ // If page is in use then page is marked as having invalid watermark at
+ // the beginning and at the end of any GC.
+ //
+ // This invariant guarantees that after flipping flag meaning at the
+ // beginning of scavenge all pages in use will be marked as having valid
+ // watermark.
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
+
+ // Returns true if the page allocation watermark was not altered during
+ // scavenge.
+ inline bool IsWatermarkValid();
- friend class MemoryAllocator;
-};
+ inline void InvalidateWatermark(bool value);
+ inline bool GetPageFlag(PageFlag flag);
+ inline void SetPageFlag(PageFlag flag, bool value);
+ inline void ClearPageFlags();
-STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+ inline void ClearGCFields();
+ static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
+ static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
+ static const uint32_t kAllocationWatermarkOffsetMask =
+ ((1 << kAllocationWatermarkOffsetBits) - 1) <<
+ kAllocationWatermarkOffsetShift;
+
+ static const uint32_t kFlagsMask =
+ ((1 << kAllocationWatermarkOffsetShift) - 1);
+
+ STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
+ kAllocationWatermarkOffsetBits);
+
+ //---------------------------------------------------------------------------
+ // Page header description.
+ //
+ // If a page is not in the large object space, the first word,
+ // opaque_header, encodes the next page address (aligned to kPageSize 8K)
+ // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
+ // opaque_header. The value range of the opaque_header is [0..kPageSize[,
+ // or [next_page_start, next_page_end[. It cannot point to a valid address
+ // in the current page. If a page is in the large object space, the first
+ // word *may* (if the page start and large object chunk start are the
+ // same) contain the address of the next large object chunk.
+ intptr_t opaque_header;
+
+ // If the page is not in the large object space, the low-order bit of the
+ // second word is set. If the page is in the large object space, the
+ // second word *may* (if the page start and large object chunk start are
+ // the same) contain the large object chunk size. In either case, the
+ // low-order bit for large object pages will be cleared.
+ // For normal pages this word is used to store page flags and
+ // offset of allocation top.
+ intptr_t flags_;
-class LargePage : public MemoryChunk {
- public:
- HeapObject* GetObject() {
- return HeapObject::FromAddress(body());
- }
+ // This field contains dirty marks for regions covering the page. Only dirty
+ // regions might contain intergenerational references.
+ // Only 32 dirty marks are supported so for large object pages several regions
+ // might be mapped to a single dirty mark.
+ uint32_t dirty_regions_;
- inline LargePage* next_page() const {
- return static_cast<LargePage*>(next_chunk());
- }
+ // The index of the page in its owner space.
+ int mc_page_index;
- inline void set_next_page(LargePage* page) {
- set_next_chunk(page);
- }
- private:
- static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+ // During mark-compact collections this field contains the forwarding address
+ // of the first live object in this page.
+ // During scavenge collection this field is used to store allocation watermark
+ // if it is altered during scavenge.
+ Address mc_first_forwarded;
- friend class MemoryAllocator;
+ Heap* heap_;
};
-STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
@@ -772,14 +380,6 @@ class Space : public Malloced {
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (id_ == CODE_SPACE) {
- return RoundDown(size, kCodeAlignment);
- } else {
- return RoundDown(size, kPointerSize);
- }
- }
-
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -830,9 +430,9 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
- size_t* allocated);
- void FreeRawMemory(Address buf, size_t length);
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated);
+ void FreeRawMemory(void* buf, size_t length);
private:
Isolate* isolate_;
@@ -843,15 +443,9 @@ class CodeRange {
class FreeBlock {
public:
FreeBlock(Address start_arg, size_t size_arg)
- : start(start_arg), size(size_arg) {
- ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
- }
+ : start(start_arg), size(size_arg) {}
FreeBlock(void* start_arg, size_t size_arg)
- : start(static_cast<Address>(start_arg)), size(size_arg) {
- ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
- }
+ : start(static_cast<Address>(start_arg)), size(size_arg) {}
Address start;
size_t size;
@@ -879,63 +473,30 @@ class CodeRange {
};
-class SkipList {
- public:
- SkipList() {
- Clear();
- }
-
- void Clear() {
- for (int idx = 0; idx < kSize; idx++) {
- starts_[idx] = reinterpret_cast<Address>(-1);
- }
- }
-
- Address StartFor(Address addr) {
- return starts_[RegionNumber(addr)];
- }
-
- void AddObject(Address addr, int size) {
- int start_region = RegionNumber(addr);
- int end_region = RegionNumber(addr + size - kPointerSize);
- for (int idx = start_region; idx <= end_region; idx++) {
- if (starts_[idx] > addr) starts_[idx] = addr;
- }
- }
-
- static inline int RegionNumber(Address addr) {
- return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
- }
-
- static void Update(Address addr, int size) {
- Page* page = Page::FromAddress(addr);
- SkipList* list = page->skip_list();
- if (list == NULL) {
- list = new SkipList();
- page->set_skip_list(list);
- }
-
- list->AddObject(addr, size);
- }
-
- private:
- static const int kRegionSizeLog2 = 13;
- static const int kRegionSize = 1 << kRegionSizeLog2;
- static const int kSize = Page::kPageSize / kRegionSize;
-
- STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
-
- Address starts_[kSize];
-};
-
-
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
-// pages for large object space.
+// allocator manages chunks for the paged heap spaces (old space and map
+// space). A paged chunk consists of pages. Pages in a chunk have contiguous
+// addresses and are linked as a list.
+//
+// The allocator keeps an initial chunk which is used for the new space. The
+// leftover regions of the initial chunk are used for the initial chunks of
+// old space and map space if they are big enough to hold at least one page.
+// The allocator assumes that there is one old space and one map space, each
+// expands the space by allocating kPagesPerChunk pages except the last
+// expansion (before running out of space). The first chunk may contain fewer
+// than kPagesPerChunk pages as well.
+//
+// The memory allocator also allocates chunks for the large object space, but
+// they are managed by the space itself. The new space does not expand.
//
-// Each space has to manage it's own pages.
+// The fact that pages for paged spaces are allocated and deallocated in chunks
+// induces a constraint on the order of pages in a linked lists. We say that
+// pages are linked in the chunk-order if and only if every two consecutive
+// pages from the same chunk are consecutive in the linked list.
//
+
+
class MemoryAllocator {
public:
explicit MemoryAllocator(Isolate* isolate);
@@ -944,15 +505,91 @@ class MemoryAllocator {
// Max capacity of the total space and executable memory limit.
bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
+ // Deletes valid chunks.
void TearDown();
- Page* AllocatePage(PagedSpace* owner, Executability executable);
+ // Reserves an initial address range of virtual memory to be split between
+ // the two new space semispaces, the old space, and the map space. The
+ // memory is not yet committed or assigned to spaces and split into pages.
+ // The initial chunk is unmapped when the memory allocator is torn down.
+ // This function should only be called when there is not already a reserved
+ // initial chunk (initial_chunk_ should be NULL). It returns the start
+ // address of the initial chunk if successful, with the side effect of
+ // setting the initial chunk, or else NULL if unsuccessful and leaves the
+ // initial chunk NULL.
+ void* ReserveInitialChunk(const size_t requested);
+
+ // Commits pages from an as-yet-unmanaged block of virtual memory into a
+ // paged space. The block should be part of the initial chunk reserved via
+ // a call to ReserveInitialChunk. The number of pages is always returned in
+ // the output parameter num_pages. This function assumes that the start
+ // address is non-null and that it is big enough to hold at least one
+ // page-aligned page. The call always succeeds, and num_pages is always
+ // greater than zero.
+ Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+ int* num_pages);
+
+ // Commit a contiguous block of memory from the initial chunk. Assumes that
+ // the address is not NULL, the size is greater than zero, and that the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ bool CommitBlock(Address start, size_t size, Executability executable);
- LargePage* AllocateLargePage(intptr_t object_size,
- Executability executable,
- Space* owner);
+ // Uncommit a contiguous block of memory [start..(start+size)[.
+ // start is not NULL, the size is greater than zero, and the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ bool UncommitBlock(Address start, size_t size);
- void Free(MemoryChunk* chunk);
+ // Zaps a contiguous block of memory [start..(start+size)[ thus
+ // filling it up with a recognizable non-NULL bit pattern.
+ void ZapBlock(Address start, size_t size);
+
+ // Attempts to allocate the requested (non-zero) number of pages from the
+ // OS. Fewer pages might be allocated than requested. If it fails to
+ // allocate memory for the OS or cannot allocate a single page, this
+ // function returns an invalid page pointer (NULL). The caller must check
+ // whether the returned page is valid (by calling Page::is_valid()). It is
+ // guaranteed that allocated pages have contiguous addresses. The actual
+ // number of allocated pages is returned in the output parameter
+ // allocated_pages. If the PagedSpace owner is executable and there is
+ // a code range, the pages are allocated from the code range.
+ Page* AllocatePages(int requested_pages, int* allocated_pages,
+ PagedSpace* owner);
+
+ // Frees pages from a given page and after. Requires pages to be
+ // linked in chunk-order (see comment for class).
+ // If 'p' is the first page of a chunk, pages from 'p' are freed
+ // and this function returns an invalid page pointer.
+ // Otherwise, the function searches a page after 'p' that is
+ // the first page of a chunk. Pages after the found page
+ // are freed and the function returns 'p'.
+ Page* FreePages(Page* p);
+
+ // Frees all pages owned by given space.
+ void FreeAllPages(PagedSpace* space);
+
+ // Allocates and frees raw memory of certain size.
+ // These are just thin wrappers around OS::Allocate and OS::Free,
+ // but keep track of allocated bytes as part of heap.
+ // If the flag is EXECUTABLE and a code range exists, the requested
+ // memory is allocated from the code range. If a code range exists
+ // and the freed memory is in it, the code range manages the freed memory.
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+ size_t* allocated,
+ Executability executable);
+ void FreeRawMemory(void* buf,
+ size_t length,
+ Executability executable);
+ void PerformAllocationCallback(ObjectSpace space,
+ AllocationAction action,
+ size_t size);
+
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action);
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+ bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps.
intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -974,68 +611,67 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
}
-#ifdef DEBUG
- // Reports statistic info of the space.
- void ReportStatistics();
-#endif
+ // Links two pages.
+ inline void SetNextPage(Page* prev, Page* next);
- MemoryChunk* AllocateChunk(intptr_t body_size,
- Executability executable,
- Space* space);
+ // Returns the next page of a given page.
+ inline Page* GetNextPage(Page* p);
- Address ReserveAlignedMemory(size_t requested,
- size_t alignment,
- VirtualMemory* controller);
- Address AllocateAlignedMemory(size_t requested,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller);
+ // Checks whether a page belongs to a space.
+ inline bool IsPageInSpace(Page* p, PagedSpace* space);
- void FreeMemory(VirtualMemory* reservation, Executability executable);
- void FreeMemory(Address addr, size_t size, Executability executable);
+ // Returns the space that owns the given page.
+ inline PagedSpace* PageOwner(Page* page);
- // Commit a contiguous block of memory from the initial chunk. Assumes that
- // the address is not NULL, the size is greater than zero, and that the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool CommitBlock(Address start, size_t size, Executability executable);
+ // Finds the first/last page in the same chunk as a given page.
+ Page* FindFirstPageInSameChunk(Page* p);
+ Page* FindLastPageInSameChunk(Page* p);
- // Uncommit a contiguous block of memory [start..(start+size)[.
- // start is not NULL, the size is greater than zero, and the
- // block is contained in the initial chunk. Returns true if it succeeded
- // and false otherwise.
- bool UncommitBlock(Address start, size_t size);
-
- // Zaps a contiguous block of memory [start..(start+size)[ thus
- // filling it up with a recognizable non-NULL bit pattern.
- void ZapBlock(Address start, size_t size);
-
- void PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size);
+ // Relinks list of pages owned by space to make it chunk-ordered.
+ // Returns new first and last pages of space.
+ // Also returns last page in relinked list which has WasInUsedBeforeMC
+ // flag set.
+ void RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use);
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action);
-
- void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
+#ifdef DEBUG
+ // Reports statistic info of the space.
+ void ReportStatistics();
+#endif
- bool MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback);
+ // Due to encoding limitation, we can only have 8K chunks.
+ static const int kMaxNofChunks = 1 << kPageSizeBits;
+ // If a chunk has at least 16 pages, the maximum heap size is about
+ // 8K * 8K * 16 = 1G bytes.
+#ifdef V8_TARGET_ARCH_X64
+ static const int kPagesPerChunk = 32;
+ // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
+ static const int kChunkTableLevels = 4;
+ static const int kChunkTableBitsPerLevel = 12;
+#else
+ static const int kPagesPerChunk = 16;
+ // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
+ static const int kChunkTableLevels = 2;
+ static const int kChunkTableBitsPerLevel = 8;
+#endif
private:
+ static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+
Isolate* isolate_;
// Maximum space size in bytes.
- size_t capacity_;
+ intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
- size_t capacity_executable_;
+ intptr_t capacity_executable_;
// Allocated space size in bytes.
- size_t size_;
+ intptr_t size_;
+
// Allocated executable space size in bytes.
- size_t size_executable_;
+ intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1047,11 +683,64 @@ class MemoryAllocator {
ObjectSpace space;
AllocationAction action;
};
-
// A List of callback that are triggered when memory is allocated or free'd
List<MemoryAllocationCallbackRegistration>
memory_allocation_callbacks_;
+ // The initial chunk of virtual memory.
+ VirtualMemory* initial_chunk_;
+
+ // Allocated chunk info: chunk start address, chunk size, and owning space.
+ class ChunkInfo BASE_EMBEDDED {
+ public:
+ ChunkInfo() : address_(NULL),
+ size_(0),
+ owner_(NULL),
+ executable_(NOT_EXECUTABLE),
+ owner_identity_(FIRST_SPACE) {}
+ inline void init(Address a, size_t s, PagedSpace* o);
+ Address address() { return address_; }
+ size_t size() { return size_; }
+ PagedSpace* owner() { return owner_; }
+ // We save executability of the owner to allow using it
+ // when collecting stats after the owner has been destroyed.
+ Executability executable() const { return executable_; }
+ AllocationSpace owner_identity() const { return owner_identity_; }
+
+ private:
+ Address address_;
+ size_t size_;
+ PagedSpace* owner_;
+ Executability executable_;
+ AllocationSpace owner_identity_;
+ };
+
+ // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
+ List<ChunkInfo> chunks_;
+ List<int> free_chunk_ids_;
+ int max_nof_chunks_;
+ int top_;
+
+ // Push/pop a free chunk id onto/from the stack.
+ void Push(int free_chunk_id);
+ int Pop();
+ bool OutOfChunkIds() { return top_ == 0; }
+
+ // Frees a chunk.
+ void DeleteChunk(int chunk_id);
+
+ // Basic check whether a chunk id is in the valid range.
+ inline bool IsValidChunkId(int chunk_id);
+
+ // Checks whether a chunk id identifies an allocated chunk.
+ inline bool IsValidChunk(int chunk_id);
+
+ // Returns the chunk id that a page belongs to.
+ inline int GetChunkId(Page* p);
+
+ // True if the address lies in the initial chunk.
+ inline bool InInitialChunk(Address address);
+
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
// collector to rebuild page headers in the from space, which is
@@ -1059,7 +748,13 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
- DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+ Page* RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ size_t chunk_size,
+ Page* prev,
+ Page** last_page_in_use);
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
};
@@ -1082,58 +777,71 @@ class ObjectIterator : public Malloced {
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
+// A HeapObjectIterator iterates objects from a given address to the
+// top of a space. The given address must be below the current
+// allocation pointer (space top). There are some caveats.
+//
+// (1) If the space top changes upward during iteration (because of
+// allocating new objects), the iterator does not iterate objects
+// above the original space top. The caller must create a new
+// iterator starting from the old top in order to visit these new
+// objects.
+//
+// (2) If new objects are allocated below the original allocation top
+// (e.g., free-list allocation in paged spaces), the new objects
+// may or may not be iterated depending on their position with
+// respect to the current point of iteration.
//
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects. The caller must create a new
-// iterator in order to be sure to visit these new objects.
+// (3) The space top should not change downward during iteration,
+// otherwise the iterator will return not-necessarily-valid
+// objects.
+
class HeapObjectIterator: public ObjectIterator {
public:
- // Creates a new object iterator in a given space.
+ // Creates a new object iterator in a given space. If a start
+ // address is not given, the iterator starts from the space bottom.
// If the size function is not given, the iterator calls the default
// Object::Size().
explicit HeapObjectIterator(PagedSpace* space);
HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+ HeapObjectIterator(PagedSpace* space, Address start);
+ HeapObjectIterator(PagedSpace* space,
+ Address start,
+ HeapObjectCallback size_func);
HeapObjectIterator(Page* page, HeapObjectCallback size_func);
- // Advance to the next object, skipping free spaces and other fillers and
- // skipping the special garbage section of which there is one per space.
- // Returns NULL when the iteration has ended.
- inline HeapObject* Next() {
- do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
- } while (AdvanceToNextPage());
- return NULL;
+ inline HeapObject* next() {
+ return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
}
- virtual HeapObject* next_object() {
- return Next();
- }
+ // implementation of ObjectIterator.
+ virtual HeapObject* next_object() { return next(); }
private:
- enum PageMode { kOnePageOnly, kAllPagesInSpace };
+ Address cur_addr_; // current iteration point
+ Address end_addr_; // end iteration point
+ Address cur_limit_; // current page limit
+ HeapObjectCallback size_func_; // size function
+ Page* end_page_; // caches the page of the end address
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
- HeapObjectCallback size_func_; // Size function or NULL.
- PagedSpace* space_;
- PageMode page_mode_;
+ HeapObject* FromCurrentPage() {
+ ASSERT(cur_addr_ < cur_limit_);
+
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_limit_);
- // Fast (inlined) path of next().
- inline HeapObject* FromCurrentPage();
+ return obj;
+ }
- // Slow path of next(), goes into the next page. Returns false if the
- // iteration has ended.
- bool AdvanceToNextPage();
+ // Slow path of next, goes into the next page.
+ HeapObject* FromNextPage();
// Initializes fields.
- inline void Initialize(PagedSpace* owner,
- Address start,
- Address end,
- PageMode mode,
- HeapObjectCallback size_func);
+ void Initialize(Address start, Address end, HeapObjectCallback size_func);
#ifdef DEBUG
// Verifies whether fields have valid values.
@@ -1144,10 +852,36 @@ class HeapObjectIterator: public ObjectIterator {
// -----------------------------------------------------------------------------
// A PageIterator iterates the pages in a paged space.
+//
+// The PageIterator class provides three modes for iterating pages in a space:
+// PAGES_IN_USE iterates pages containing allocated objects.
+// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
+// mark-compact collection.
+// ALL_PAGES iterates all pages in the space.
+//
+// There are some caveats.
+//
+// (1) If the space expands during iteration, new pages will not be
+// returned by the iterator in any mode.
+//
+// (2) If new objects are allocated during iteration, they will appear
+// in pages returned by the iterator. Allocation may cause the
+// allocation pointer or MC allocation pointer in the last page to
+// change between constructing the iterator and iterating the last
+// page.
+//
+// (3) The space should not shrink during iteration, otherwise the
+// iterator will return deallocated pages.
class PageIterator BASE_EMBEDDED {
public:
- explicit inline PageIterator(PagedSpace* space);
+ enum Mode {
+ PAGES_IN_USE,
+ PAGES_USED_BY_MC,
+ ALL_PAGES
+ };
+
+ PageIterator(PagedSpace* space, Mode mode);
inline bool has_next();
inline Page* next();
@@ -1155,25 +889,21 @@ class PageIterator BASE_EMBEDDED {
private:
PagedSpace* space_;
Page* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- Page* next_page_;
+ Page* stop_page_; // Page to stop at (last page returned by the iterator).
};
// -----------------------------------------------------------------------------
-// A space has a circular list of pages. The next page can be accessed via
-// Page::next_page() call.
+// A space has a list of pages. The next page can be accessed via
+// Page::next_page() call. The next page of the last page is an
+// invalid page pointer. A space can expand and shrink dynamically.
// An abstraction of allocation and relocation pointers in a page-structured
// space.
class AllocationInfo {
public:
- AllocationInfo() : top(NULL), limit(NULL) {
- }
-
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
+ Address top; // current allocation top
+ Address limit; // current allocation limit
#ifdef DEBUG
bool VerifyPagedAllocation() {
@@ -1205,199 +935,70 @@ class AllocationStats BASE_EMBEDDED {
// Zero out all the allocation statistics (ie, no capacity).
void Clear() {
capacity_ = 0;
+ available_ = 0;
size_ = 0;
waste_ = 0;
}
- void ClearSizeWaste() {
- size_ = capacity_;
- waste_ = 0;
- }
-
// Reset the allocation statistics (ie, available = capacity with no
// wasted or allocated bytes).
void Reset() {
+ available_ = capacity_;
size_ = 0;
waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
+ intptr_t Available() { return available_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
- // Grow the space by adding available bytes. They are initially marked as
- // being in use (part of the size), but will normally be immediately freed,
- // putting them on the free list and removing them from size_.
+ // Grow the space by adding available bytes.
void ExpandSpace(int size_in_bytes) {
capacity_ += size_in_bytes;
- size_ += size_in_bytes;
- ASSERT(size_ >= 0);
+ available_ += size_in_bytes;
}
- // Shrink the space by removing available bytes. Since shrinking is done
- // during sweeping, bytes have been marked as being in use (part of the size)
- // and are hereby freed.
+ // Shrink the space by removing available bytes.
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
- size_ -= size_in_bytes;
- ASSERT(size_ >= 0);
+ available_ -= size_in_bytes;
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
+ available_ -= size_in_bytes;
size_ += size_in_bytes;
- ASSERT(size_ >= 0);
}
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- ASSERT(size_ >= 0);
+ available_ += size_in_bytes;
}
// Waste free bytes (available -> waste).
void WasteBytes(int size_in_bytes) {
- size_ -= size_in_bytes;
+ available_ -= size_in_bytes;
waste_ += size_in_bytes;
- ASSERT(size_ >= 0);
+ }
+
+ // Consider the wasted bytes to be allocated, as they contain filler
+ // objects (waste -> size).
+ void FillWastedBytes(intptr_t size_in_bytes) {
+ waste_ -= size_in_bytes;
+ size_ += size_in_bytes;
}
private:
intptr_t capacity_;
+ intptr_t available_;
intptr_t size_;
intptr_t waste_;
};
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline FreeListNode* next();
- inline FreeListNode** next_address();
- inline void set_next(FreeListNode* next);
-
- inline void Zap();
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space. The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which
-// is divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
-
-// The old space free list is organized in categories.
-// 1-31 words: Such small free areas are discarded for efficiency reasons.
-// They can be reclaimed by the compactor. However the distance between top
-// and limit may be this small.
-// 32-255 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 1-31 words in size. These
-// spaces are called small.
-// 256-2047 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 32-255 words in size. These
-// spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large. It is used for top
-// and limit when the object we need to allocate is 256-2047 words in size.
-// These spaces are call large.
-// At least 16384 words. This list is for objects of 2048 words or larger.
-// Empty pages are added to this list. These spaces are called huge.
-class FreeList BASE_EMBEDDED {
- public:
- explicit FreeList(PagedSpace* owner);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // ie, its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
-
- void MarkNodes();
-
-#ifdef DEBUG
- void Zap();
- static intptr_t SumFreeList(FreeListNode* node);
- static int FreeListLength(FreeListNode* cur);
- intptr_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
- void CountFreeListItems(Page* p, intptr_t* sizes);
-
- private:
- // The size range of blocks, in bytes.
- static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
- FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
-
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
- PagedSpace* owner_;
- Heap* heap_;
-
- // Total available bytes in all blocks on this free list.
- int available_;
-
- static const int kSmallListMin = 0x20 * kPointerSize;
- static const int kSmallListMax = 0xff * kPointerSize;
- static const int kMediumListMax = 0x7ff * kPointerSize;
- static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
- static const int kMediumAllocationMax = kSmallListMax;
- static const int kLargeAllocationMax = kMediumListMax;
- FreeListNode* small_list_;
- FreeListNode* medium_list_;
- FreeListNode* large_list_;
- FreeListNode* huge_list_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
-};
-
-
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
@@ -1412,7 +1013,7 @@ class PagedSpace : public Space {
// the memory allocator's initial chunk) if possible. If the block of
// addresses is not big enough to contain a single page-aligned page, a
// fresh chunk will be allocated.
- bool Setup();
+ bool Setup(Address start, size_t size);
// Returns true if the space has been successfully set up and not
// subsequently torn down.
@@ -1425,6 +1026,8 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
bool Contains(HeapObject* o) { return Contains(o->address()); }
+ // Never crashes even if a is not a valid pointer.
+ inline bool SafeContains(Address a);
// Given an address occupied by a live object, return that object if it is
// in this space, or Failure::Exception() if it is not. The implementation
@@ -1432,91 +1035,104 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
+ // Checks whether page is currently in use by this space.
+ bool IsUsed(Page* page);
+
+ void MarkAllPagesClean();
+
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
+ virtual void PrepareForMarkCompact(bool will_compact);
- // Current capacity without growing (Size() + Available()).
+ // The top of allocation in a page in this space. Undefined if page is unused.
+ Address PageAllocationTop(Page* page) {
+ return page == TopPageOf(allocation_info_) ? top()
+ : PageAllocationLimit(page);
+ }
+
+ // The limit of allocation for a page in this space.
+ virtual Address PageAllocationLimit(Page* page) = 0;
+
+ void FlushTopPageWatermark() {
+ AllocationTopPage()->SetCachedAllocationWatermark(top());
+ AllocationTopPage()->InvalidateWatermark(true);
+ }
+
+ // Current capacity without growing (Size() + Available() + Waste()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
- // Sets the capacity, the available space and the wasted space to zero.
- // The stats are rebuilt during sweeping by adding each page to the
- // capacity and the size when it is encountered. As free spaces are
- // discovered during the sweeping they are subtracted from the size and added
- // to the available and wasted totals.
- void ClearStats() {
- accounting_stats_.ClearSizeWaste();
- }
-
- // Available bytes without growing. These are the bytes on the free list.
- // The bytes in the linear allocation area are not included in this total
- // because updating the stats would slow down allocation. New pages are
- // immediately added to the free list so they show up here.
- intptr_t Available() { return free_list_.available(); }
+ // Available bytes without growing.
+ intptr_t Available() { return accounting_stats_.Available(); }
- // Allocated bytes in this space. Garbage bytes that were not found due to
- // lazy sweeping are counted as being allocated! The bytes in the current
- // linear allocation area (between top and limit) are also counted here.
+ // Allocated bytes in this space.
virtual intptr_t Size() { return accounting_stats_.Size(); }
- // As size, but the bytes in the current linear allocation area are not
- // included.
- virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
+ // Wasted bytes due to fragmentation and not recoverable until the
+ // next GC of this space.
+ intptr_t Waste() { return accounting_stats_.Waste(); }
- // Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation. They do not include the
- // free bytes that were not found at all due to lazy sweeping.
- virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+ // Returns the address of the first object in this space.
+ Address bottom() { return first_page_->ObjectAreaStart(); }
// Returns the allocation pointer in this space.
- Address top() {
- return allocation_info_.top;
- }
- Address limit() { return allocation_info_.limit; }
+ Address top() { return allocation_info_.top; }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+ // Allocate the requested number of bytes for relocation during mark-compact
+ // collection.
+ MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
+
virtual bool ReserveSpace(int bytes);
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- int Free(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes);
- accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
- return size_in_bytes - wasted;
- }
+ // Used by ReserveSpace.
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
+
+ // Free all pages in range from prev (exclusive) to last (inclusive).
+ // Freed pages are moved to the end of page list.
+ void FreePages(Page* prev, Page* last);
+
+ // Deallocates a block.
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) = 0;
// Set space allocation info.
- void SetTop(Address top, Address limit) {
- ASSERT(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ void SetTop(Address top) {
allocation_info_.top = top;
- allocation_info_.limit = limit;
+ allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
}
- void Allocate(int bytes) {
- accounting_stats_.AllocateBytes(bytes);
- }
+ // ---------------------------------------------------------------------------
+ // Mark-compact collection support functions
- void IncreaseCapacity(int size) {
- accounting_stats_.ExpandSpace(size);
+ // Set the relocation point to the beginning of the space.
+ void MCResetRelocationInfo();
+
+ // Writes relocation info to the top page.
+ void MCWriteRelocationInfoToPage() {
+ TopPageOf(mc_forwarding_info_)->
+ SetAllocationWatermark(mc_forwarding_info_.top);
}
- // Releases an unused page and shrinks the space.
- void ReleasePage(Page* page);
+ // Computes the offset of a given address in this space to the beginning
+ // of the space.
+ int MCSpaceOffsetForAddress(Address addr);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo() = 0;
- // Releases all of the unused pages.
- void ReleaseAllUnusedPages();
+ // Releases half of unused pages.
+ void Shrink();
- // The dummy page that anchors the linked list of pages.
- Page* anchor() { return &anchor_; }
+ // Ensures that the capacity is at least 'capacity'. Returns false on failure.
+ bool EnsureCapacity(int capacity);
#ifdef DEBUG
// Print meta info and objects in this space.
@@ -1525,9 +1141,6 @@ class PagedSpace : public Space {
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
- // Reports statistics for the space
- void ReportStatistics();
-
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
@@ -1538,67 +1151,10 @@ class PagedSpace : public Space {
static void ResetCodeStatistics();
#endif
- bool was_swept_conservatively() { return was_swept_conservatively_; }
- void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
-
- // Evacuation candidates are swept by evacuator. Needs to return a valid
- // result before _and_ after evacuation has finished.
- static bool ShouldBeSweptLazily(Page* p) {
- return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
- !p->WasSweptPrecisely();
- }
-
- void SetPagesToSweep(Page* first, Page* last) {
- first_unswept_page_ = first;
- last_unswept_page_ = last;
- }
-
- bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
- bool IsSweepingComplete() {
- return !first_unswept_page_->is_valid();
- }
-
- Page* FirstPage() { return anchor_.next_page(); }
- Page* LastPage() { return anchor_.prev_page(); }
-
- bool IsFragmented(Page* p) {
- intptr_t sizes[4];
- free_list_.CountFreeListItems(p, sizes);
-
- intptr_t ratio;
- intptr_t ratio_threshold;
- if (identity() == CODE_SPACE) {
- ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize;
- ratio_threshold = 10;
- } else {
- ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize;
- ratio_threshold = 15;
- }
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p),
- identity(),
- static_cast<int>(sizes[0]),
- static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[1]),
- static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[2]),
- static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[3]),
- static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
+ // Returns the page of the allocation pointer.
+ Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
- return (ratio > ratio_threshold) ||
- (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
- }
-
- void EvictEvacuationCandidatesFromFreeLists();
-
- bool CanExpand();
+ void RelinkPageListInChunkOrder(bool deallocate_blocks);
protected:
// Maximum capacity of this space.
@@ -1607,42 +1163,80 @@ class PagedSpace : public Space {
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The dummy page that anchors the double linked list of pages.
- Page anchor_;
+ // The first page in this space.
+ Page* first_page_;
- // The space's free list.
- FreeList free_list_;
+ // The last page in this space. Initially set in Setup, updated in
+ // Expand and Shrink.
+ Page* last_page_;
+
+ // True if pages owned by this space are linked in chunk-order.
+ // See comment for class MemoryAllocator for definition of chunk-order.
+ bool page_list_is_chunk_ordered_;
// Normal allocation information.
AllocationInfo allocation_info_;
+ // Relocation information during mark-compact collections.
+ AllocationInfo mc_forwarding_info_;
+
// Bytes of each page that cannot be allocated. Possibly non-zero
// for pages in spaces with only fixed-size objects. Always zero
// for pages in spaces with variable sized objects (those pages are
// padded with free-list nodes).
int page_extra_;
- bool was_swept_conservatively_;
+ // Sets allocation pointer to a page bottom.
+ static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
- Page* first_unswept_page_;
- Page* last_unswept_page_;
+ // Returns the top page specified by an allocation info structure.
+ static Page* TopPageOf(AllocationInfo alloc_info) {
+ return Page::FromAllocationTop(alloc_info.limit);
+ }
+
+ int CountPagesToTop() {
+ Page* p = Page::FromAllocationTop(allocation_info_.top);
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ int counter = 1;
+ while (it.has_next()) {
+ if (it.next() == p) return counter;
+ counter++;
+ }
+ UNREACHABLE();
+ return -1;
+ }
// Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS.
- bool Expand();
+ // it cannot allocate requested number of pages from OS. Newly allocated
+ // pages are append to the last_page;
+ bool Expand(Page* last_page);
+
+ // Generic fast case allocation function that tries linear allocation in
+ // the top page of 'alloc_info'. Returns NULL on failure.
+ inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
+ int size_in_bytes);
- // Generic fast case allocation function that tries linear allocation at the
- // address denoted by top in allocation_info_.
- inline HeapObject* AllocateLinearly(int size_in_bytes);
+ // During normal allocation or deserialization, roll to the next page in
+ // the space (there is assumed to be one) and allocate there. This
+ // function is space-dependent.
+ virtual HeapObject* AllocateInNextPage(Page* current_page,
+ int size_in_bytes) = 0;
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
+
+ // Slow path of MCAllocateRaw.
+ MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
#endif
+ private:
+ // Returns a pointer to the page of the relocation pointer.
+ Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+
friend class PageIterator;
};
@@ -1682,113 +1276,20 @@ class HistogramInfo: public NumberAndSizeInfo {
};
-enum SemiSpaceId {
- kFromSpace = 0,
- kToSpace = 1
-};
-
-
-class SemiSpace;
-
-
-class NewSpacePage : public MemoryChunk {
- public:
- // GC related flags copied from from-space to to-space when
- // flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::SCAN_ON_SCAVENGE);
-
- inline NewSpacePage* next_page() const {
- return static_cast<NewSpacePage*>(next_chunk());
- }
-
- inline void set_next_page(NewSpacePage* page) {
- set_next_chunk(page);
- }
-
- inline NewSpacePage* prev_page() const {
- return static_cast<NewSpacePage*>(prev_chunk());
- }
-
- inline void set_prev_page(NewSpacePage* page) {
- set_prev_chunk(page);
- }
-
- SemiSpace* semi_space() {
- return reinterpret_cast<SemiSpace*>(owner());
- }
-
- bool is_anchor() { return !this->InNewSpace(); }
-
- static bool IsAtStart(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
- == kObjectStartOffset;
- }
-
- static bool IsAtEnd(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
- }
-
- Address address() {
- return reinterpret_cast<Address>(this);
- }
-
- // Finds the NewSpacePage containg the given address.
- static inline NewSpacePage* FromAddress(Address address_in_page) {
- Address page_start =
- reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
- ~Page::kPageAlignmentMask);
- NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
- ASSERT(page->InNewSpace());
- return page;
- }
-
- // Find the page for a limit address. A limit address is either an address
- // inside a page, or the address right after the last byte of a page.
- static inline NewSpacePage* FromLimit(Address address_limit) {
- return NewSpacePage::FromAddress(address_limit - 1);
- }
-
- private:
- // Create a NewSpacePage object that is only used as anchor
- // for the doubly-linked list of real pages.
- explicit NewSpacePage(SemiSpace* owner) {
- InitializeAsAnchor(owner);
- }
-
- static NewSpacePage* Initialize(Heap* heap,
- Address start,
- SemiSpace* semi_space);
-
- // Intialize a fake NewSpacePage used as sentinel at the ends
- // of a doubly-linked list of real NewSpacePages.
- // Only uses the prev/next links, and sets flags to not be in new-space.
- void InitializeAsAnchor(SemiSpace* owner);
-
- friend class SemiSpace;
- friend class SemiSpaceIterator;
-};
-
-
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
+// A semispace is a contiguous chunk of memory. The mark-compact collector
+// uses the memory in the from space as a marking stack when tracing live
+// objects.
class SemiSpace : public Space {
public:
// Constructor.
- SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- start_(NULL),
- age_mark_(NULL),
- id_(semispace),
- anchor_(this),
- current_page_(NULL) { }
+ explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
+ start_ = NULL;
+ age_mark_ = NULL;
+ }
// Sets up the semispace using the given chunk.
bool Setup(Address start, int initial_capacity, int maximum_capacity);
@@ -1800,9 +1301,14 @@ class SemiSpace : public Space {
// True if the space has been set up but not torn down.
bool HasBeenSetup() { return start_ != NULL; }
+ // Grow the size of the semispace by committing extra virtual memory.
+ // Assumes that the caller has checked that the semispace has not reached
+ // its maximum capacity (and thus there is space available in the reserved
+ // address range to grow).
+ bool Grow();
+
// Grow the semispace to the new capacity. The new capacity
- // requested must be larger than the current capacity and less than
- // the maximum capacity.
+ // requested must be larger than the current capacity.
bool GrowTo(int new_capacity);
// Shrinks the semispace to the new capacity. The new capacity
@@ -1810,41 +1316,14 @@ class SemiSpace : public Space {
// semispace and less than the current capacity.
bool ShrinkTo(int new_capacity);
- // Returns the start address of the first page of the space.
- Address space_start() {
- ASSERT(anchor_.next_page() != &anchor_);
- return anchor_.next_page()->body();
- }
-
- // Returns the start address of the current page of the space.
- Address page_low() {
- ASSERT(anchor_.next_page() != &anchor_);
- return current_page_->body();
- }
-
+ // Returns the start address of the space.
+ Address low() { return start_; }
// Returns one past the end address of the space.
- Address space_end() {
- return anchor_.prev_page()->body_limit();
- }
-
- // Returns one past the end address of the current page of the space.
- Address page_high() {
- return current_page_->body_limit();
- }
-
- bool AdvancePage() {
- NewSpacePage* next_page = current_page_->next_page();
- if (next_page == anchor()) return false;
- current_page_ = next_page;
- return true;
- }
-
- // Resets the space to using the first page.
- void Reset();
+ Address high() { return low() + capacity_; }
// Age mark accessors.
Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark);
+ void set_age_mark(Address mark) { age_mark_ = mark; }
// True if the address is in the address range of this semispace (not
// necessarily below the allocation pointer).
@@ -1859,6 +1338,11 @@ class SemiSpace : public Space {
return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
}
+ // The offset of an address from the beginning of the space.
+ int SpaceOffsetForAddress(Address addr) {
+ return static_cast<int>(addr - low());
+ }
+
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
virtual intptr_t Size() {
@@ -1875,19 +1359,9 @@ class SemiSpace : public Space {
bool Commit();
bool Uncommit();
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
-
#ifdef DEBUG
virtual void Print();
virtual void Verify();
- // Validate a range of of addresses in a SemiSpace.
- // The "from" address must be on a page prior to the "to" address,
- // in the linked page order, or it must be earlier on the same page.
- static void AssertValidRange(Address from, Address to);
-#else
- // Do nothing.
- inline static void AssertValidRange(Address from, Address to) {}
#endif
// Returns the current capacity of the semi space.
@@ -1899,17 +1373,7 @@ class SemiSpace : public Space {
// Returns the initial capacity of the semi space.
int InitialCapacity() { return initial_capacity_; }
- SemiSpaceId id() { return id_; }
-
- static void Swap(SemiSpace* from, SemiSpace* to);
-
private:
- // Flips the semispace between being from-space and to-space.
- // Copies the flags into the masked positions on all pages in the space.
- void FlipPages(intptr_t flags, intptr_t flag_mask);
-
- NewSpacePage* anchor() { return &anchor_; }
-
// The current and maximum capacity of the space.
int capacity_;
int maximum_capacity_;
@@ -1926,13 +1390,7 @@ class SemiSpace : public Space {
uintptr_t object_expected_;
bool committed_;
- SemiSpaceId id_;
- NewSpacePage anchor_;
- NewSpacePage* current_page_;
-
- friend class SemiSpaceIterator;
- friend class NewSpacePageIterator;
public:
TRACK_MEMORY("SemiSpace")
};
@@ -1948,26 +1406,12 @@ class SemiSpaceIterator : public ObjectIterator {
// Create an iterator over the objects in the given space. If no start
// address is given, the iterator starts from the bottom of the space. If
// no size function is given, the iterator calls Object::Size().
-
- // Iterate over all of allocated to-space.
explicit SemiSpaceIterator(NewSpace* space);
- // Iterate over all of allocated to-space, with a custome size function.
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
- // Iterate over part of allocated to-space, from start to the end
- // of allocation.
SemiSpaceIterator(NewSpace* space, Address start);
- // Iterate from one address to another in the same semi-space.
- SemiSpaceIterator(Address from, Address to);
- HeapObject* Next() {
+ HeapObject* next() {
if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- ASSERT(!page->is_anchor());
- current_ = page->body();
- if (current_ == limit_) return NULL;
- }
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1977,13 +1421,14 @@ class SemiSpaceIterator : public ObjectIterator {
}
// Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return Next(); }
+ virtual HeapObject* next_object() { return next(); }
private:
- void Initialize(Address start,
- Address end,
+ void Initialize(NewSpace* space, Address start, Address end,
HeapObjectCallback size_func);
+ // The semispace.
+ SemiSpace* space_;
// The current iteration point.
Address current_;
// The end of iteration.
@@ -1994,34 +1439,6 @@ class SemiSpaceIterator : public ObjectIterator {
// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
- // Make an iterator that runs over all pages in to-space.
- explicit inline NewSpacePageIterator(NewSpace* space);
-
- // Make an iterator that runs over all pages in the given semispace,
- // even those not used in allocation.
- explicit inline NewSpacePageIterator(SemiSpace* space);
-
- // Make iterator that iterates from the page containing start
- // to the page that contains limit in the same semispace.
- inline NewSpacePageIterator(Address start, Address limit);
-
- inline bool has_next();
- inline NewSpacePage* next();
-
- private:
- NewSpacePage* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- NewSpacePage* next_page_;
- // Last page returned.
- NewSpacePage* last_page_;
-};
-
-
-// -----------------------------------------------------------------------------
// The young generation space.
//
// The new space consists of a contiguous pair of semispaces. It simply
@@ -2032,13 +1449,11 @@ class NewSpace : public Space {
// Constructor.
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace),
- reservation_(),
- inline_allocation_limit_step_(0) {}
+ to_space_(heap),
+ from_space_(heap) {}
// Sets up the new space using the given chunk.
- bool Setup(int reserved_semispace_size_, int max_semispace_size);
+ bool Setup(Address start, int size);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2065,30 +1480,18 @@ class NewSpace : public Space {
return (reinterpret_cast<uintptr_t>(a) & address_mask_)
== reinterpret_cast<uintptr_t>(start_);
}
-
bool Contains(Object* o) {
- Address a = reinterpret_cast<Address>(o);
- return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
+ return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
}
// Return the allocated bytes in the active semispace.
- virtual intptr_t Size() {
- return pages_used_ * Page::kObjectAreaSize +
- static_cast<int>(top() - to_space_.page_low());
- }
-
+ virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
// The same, but returning an int. We have to have the one that returns
// intptr_t because it is inherited, but if we know we are dealing with the
// new space, which can't get as big as the other spaces then this is useful:
int SizeAsInt() { return static_cast<int>(Size()); }
// Return the current capacity of a semispace.
- intptr_t EffectiveCapacity() {
- ASSERT(to_space_.Capacity() == from_space_.Capacity());
- return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
- }
-
- // Return the current capacity of a semispace.
intptr_t Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity();
@@ -2100,11 +1503,8 @@ class NewSpace : public Space {
return Capacity();
}
- // Return the available bytes without growing or switching page in the
- // active semispace.
- intptr_t Available() {
- return allocation_info_.limit - allocation_info_.top;
- }
+ // Return the available bytes without growing in the active semispace.
+ intptr_t Available() { return Capacity() - Size(); }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -2119,12 +1519,9 @@ class NewSpace : public Space {
}
// Return the address of the allocation pointer in the active semispace.
- Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
- }
+ Address top() { return allocation_info_.top; }
// Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.space_start(); }
+ Address bottom() { return to_space_.low(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
@@ -2136,70 +1533,54 @@ class NewSpace : public Space {
Address start() { return start_; }
uintptr_t mask() { return address_mask_; }
- INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
- ASSERT(Contains(addr));
- ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
- IsAligned(OffsetFrom(addr) - 1, kPointerSize));
- return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
- }
-
- INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
- return reinterpret_cast<Address>(index << kPointerSizeLog2);
- }
-
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes);
+ return AllocateRawInternal(size_in_bytes, &allocation_info_);
+ }
+
+ // Allocate the requested number of bytes for relocation during mark-compact
+ // collection.
+ MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
+ return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
}
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ // Reset the reloction pointer to the bottom of the inactive semispace in
+ // preparation for mark-compact collection.
+ void MCResetRelocationInfo();
+ // Update the allocation pointer in the active semispace after a
+ // mark-compact collection.
+ void MCCommitRelocationInfo();
- void LowerInlineAllocationLimit(intptr_t step) {
- inline_allocation_limit_step_ = step;
- if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
- } else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
- }
- top_on_previous_step_ = allocation_info_.top;
- }
-
- // Get the extent of the inactive semispace (for use as a marking stack,
- // or to zap it). Notice: space-addresses are not necessarily on the
- // same page, so FromSpaceStart() might be above FromSpaceEnd().
- Address FromSpacePageLow() { return from_space_.page_low(); }
- Address FromSpacePageHigh() { return from_space_.page_high(); }
- Address FromSpaceStart() { return from_space_.space_start(); }
- Address FromSpaceEnd() { return from_space_.space_end(); }
+ // Get the extent of the inactive semispace (for use as a marking stack).
+ Address FromSpaceLow() { return from_space_.low(); }
+ Address FromSpaceHigh() { return from_space_.high(); }
- // Get the extent of the active semispace's pages' memory.
- Address ToSpaceStart() { return to_space_.space_start(); }
- Address ToSpaceEnd() { return to_space_.space_end(); }
+ // Get the extent of the active semispace (to sweep newly copied objects
+ // during a scavenge collection).
+ Address ToSpaceLow() { return to_space_.low(); }
+ Address ToSpaceHigh() { return to_space_.high(); }
- inline bool ToSpaceContains(Address address) {
- return to_space_.Contains(address);
+ // Offsets from the beginning of the semispaces.
+ int ToSpaceOffsetForAddress(Address a) {
+ return to_space_.SpaceOffsetForAddress(a);
}
- inline bool FromSpaceContains(Address address) {
- return from_space_.Contains(address);
+ int FromSpaceOffsetForAddress(Address a) {
+ return from_space_.SpaceOffsetForAddress(a);
}
// True if the object is a heap object in the address range of the
// respective semispace (not necessarily below the allocation pointer of the
// semispace).
- inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+ bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+ bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
- // Try to switch the active semispace to a new, empty, page.
- // Returns false if this isn't possible or reasonable (i.e., there
- // are no pages, or the current page is already empty), or true
- // if successful.
- bool AddFreshPage();
+ bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
+ bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
virtual bool ReserveSpace(int bytes);
@@ -2239,24 +1620,10 @@ class NewSpace : public Space {
return from_space_.Uncommit();
}
- inline intptr_t inline_allocation_limit_step() {
- return inline_allocation_limit_step_;
- }
-
- SemiSpace* active_space() { return &to_space_; }
-
private:
- // Update allocation info to match the current to-space page.
- void UpdateAllocationInfo();
-
- Address chunk_base_;
- uintptr_t chunk_size_;
-
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
- VirtualMemory reservation_;
- int pages_used_;
// Start address and bit mask for containment testing.
Address start_;
@@ -2267,20 +1634,15 @@ class NewSpace : public Space {
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
-
- // When incremental marking is active we will set allocation_info_.limit
- // to be lower than actual limit and then will gradually increase it
- // in steps to guarantee that we do incremental marking steps even
- // when all allocation is performed from inlined generated code.
- intptr_t inline_allocation_limit_step_;
-
- Address top_on_previous_step_;
+ AllocationInfo mc_forwarding_info_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- // Implementation of AllocateRaw.
- MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes);
+ // Implementation of AllocateRaw and MCAllocateRaw.
+ MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
+ int size_in_bytes,
+ AllocationInfo* alloc_info);
friend class SemiSpaceIterator;
@@ -2290,6 +1652,193 @@ class NewSpace : public Space {
// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap. They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object). They have a size and a next pointer. The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+ // Obtain a free-list node from a raw address. This is not a cast because
+ // it does not check nor require that the first word at the address is a map
+ // pointer.
+ static FreeListNode* FromAddress(Address address) {
+ return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+ }
+
+ static inline bool IsFreeListNode(HeapObject* object);
+
+ // Set the size in bytes, which can be read with HeapObject::Size(). This
+ // function also writes a map to the first word of the block so that it
+ // looks like a heap object to the garbage collector and heap iteration
+ // functions.
+ void set_size(Heap* heap, int size_in_bytes);
+
+ // Accessors for the next field.
+ inline Address next(Heap* heap);
+ inline void set_next(Heap* heap, Address next);
+
+ private:
+ static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.
+class OldSpaceFreeList BASE_EMBEDDED {
+ public:
+ OldSpaceFreeList(Heap* heap, AllocationSpace owner);
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ intptr_t available() { return available_; }
+
+ // Place a node on the free list. The block of size 'size_in_bytes'
+ // starting at 'start' is placed on the free list. The return value is the
+ // number of bytes that have been lost due to internal fragmentation by
+ // freeing the block. Bookkeeping information will be written to the block,
+ // ie, its contents will be destroyed. The start address should be word
+ // aligned, and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size 'size_in_bytes' from the free list. The block
+ // is unitialized. A failure is returned if no block is available. The
+ // number of bytes lost to fragmentation is returned in the output parameter
+ // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
+
+ void MarkNodes();
+
+ private:
+ // The size range of blocks, in bytes. (Smaller allocations are allowed, but
+ // will always result in waste.)
+ static const int kMinBlockSize = 2 * kPointerSize;
+ static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+ Heap* heap_;
+
+ // The identity of the owning space, for building allocation Failure
+ // objects.
+ AllocationSpace owner_;
+
+ // Total available bytes in all blocks on this free list.
+ int available_;
+
+ // Blocks are put on exact free lists in an array, indexed by size in words.
+ // The available sizes are kept in an increasingly ordered list. Entries
+ // corresponding to sizes < kMinBlockSize always have an empty free list
+ // (but index kHead is used for the head of the size list).
+ struct SizeNode {
+ // Address of the head FreeListNode of the implied block size or NULL.
+ Address head_node_;
+ // Size (words) of the next larger available size if head_node_ != NULL.
+ int next_size_;
+ };
+ static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
+ SizeNode free_[kFreeListsLength];
+
+ // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
+ static const int kHead = kMinBlockSize / kPointerSize - 1;
+ static const int kEnd = kMaxInt;
+
+ // We keep a "finger" in the size list to speed up a common pattern:
+ // repeated requests for the same or increasing sizes.
+ int finger_;
+
+ // Starting from *prev, find and return the smallest size >= index (words),
+ // or kEnd. Update *prev to be the largest size < index, or kHead.
+ int FindSize(int index, int* prev) {
+ int cur = free_[*prev].next_size_;
+ while (cur < index) {
+ *prev = cur;
+ cur = free_[cur].next_size_;
+ }
+ return cur;
+ }
+
+ // Remove an existing element from the size list.
+ void RemoveSize(int index) {
+ int prev = kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(cur == index);
+ free_[prev].next_size_ = free_[cur].next_size_;
+ finger_ = prev;
+ }
+
+ // Insert a new element into the size list.
+ void InsertSize(int index) {
+ int prev = kHead;
+ int cur = FindSize(index, &prev);
+ ASSERT(cur != index);
+ free_[prev].next_size_ = index;
+ free_[index].next_size_ = cur;
+ }
+
+ // The size list is not updated during a sequence of calls to Free, but is
+ // rebuilt before the next allocation.
+ void RebuildSizeList();
+ bool needs_rebuild_;
+
+#ifdef DEBUG
+ // Does this free list contain a free block located at the address of 'node'?
+ bool Contains(FreeListNode* node);
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
+};
+
+
+// The free list for the map space.
+class FixedSizeFreeList BASE_EMBEDDED {
+ public:
+ FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
+
+ // Clear the free list.
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ intptr_t available() { return available_; }
+
+ // Place a node on the free list. The block starting at 'start' (assumed to
+ // have size object_size_) is placed on the free list. Bookkeeping
+ // information will be written to the block, ie, its contents will be
+ // destroyed. The start address should be word aligned.
+ void Free(Address start);
+
+ // Allocate a fixed sized block from the free list. The block is unitialized.
+ // A failure is returned if no block is available.
+ MUST_USE_RESULT MaybeObject* Allocate();
+
+ void MarkNodes();
+
+ private:
+ Heap* heap_;
+
+ // Available bytes on the free list.
+ intptr_t available_;
+
+ // The head of the free list.
+ Address head_;
+
+ // The tail of the free list.
+ Address tail_;
+
+ // The identity of the owning space, for building allocation Failure
+ // objects.
+ AllocationSpace owner_;
+
+ // The size of the objects in this space.
+ int object_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
+};
+
+
+// -----------------------------------------------------------------------------
// Old object space (excluding map objects)
class OldSpace : public PagedSpace {
@@ -2300,28 +1849,71 @@ class OldSpace : public PagedSpace {
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {
+ : PagedSpace(heap, max_capacity, id, executable),
+ free_list_(heap, id) {
page_extra_ = 0;
}
+ // The bytes available on the free list (ie, not above the linear allocation
+ // pointer).
+ intptr_t AvailableFree() { return free_list_.available(); }
+
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
}
+ // Give a block of memory to the space's free list. It might be added to
+ // the free list or accounted as waste.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ void Free(Address start, int size_in_bytes, bool add_to_freelist) {
+ accounting_stats_.DeallocateBytes(size_in_bytes);
+
+ if (add_to_freelist) {
+ int wasted_bytes = free_list_.Free(start, size_in_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
+ }
+
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
+ // Prepare for full garbage collection. Resets the relocation pointer and
+ // clears the free list.
+ virtual void PrepareForMarkCompact(bool will_compact);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo();
+
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
+ void MarkFreeListNodes() { free_list_.MarkNodes(); }
+
+#ifdef DEBUG
+ // Reports statistics for the space
+ void ReportStatistics();
+#endif
+
+ protected:
+ // Virtual function in the superclass. Slow path of AllocateRaw.
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ // Virtual function in the superclass. Allocate linearly at the start of
+ // the page after current_page (there is assumed to be one).
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
+ private:
+ // The space's free list.
+ OldSpaceFreeList free_list_;
+
public:
TRACK_MEMORY("OldSpace")
};
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
-
-
// -----------------------------------------------------------------------------
// Old space for objects of a fixed size
@@ -2334,7 +1926,8 @@ class FixedSpace : public PagedSpace {
const char* name)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
- name_(name) {
+ name_(name),
+ free_list_(heap, id, object_size_in_bytes) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
@@ -2345,12 +1938,44 @@ class FixedSpace : public PagedSpace {
int object_size_in_bytes() { return object_size_in_bytes_; }
+ // Give a fixed sized block of memory to the space's free list.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ void Free(Address start, bool add_to_freelist) {
+ if (add_to_freelist) {
+ free_list_.Free(start);
+ }
+ accounting_stats_.DeallocateBytes(object_size_in_bytes_);
+ }
+
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
+ virtual void PrepareForMarkCompact(bool will_compact);
+
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo();
+
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
void MarkFreeListNodes() { free_list_.MarkNodes(); }
+#ifdef DEBUG
+ // Reports statistic info of the space
+ void ReportStatistics();
+#endif
+
protected:
+ // Virtual function in the superclass. Slow path of AllocateRaw.
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ // Virtual function in the superclass. Allocate linearly at the start of
+ // the page after current_page (there is assumed to be one).
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
void ResetFreeList() {
free_list_.Reset();
}
@@ -2361,6 +1986,9 @@ class FixedSpace : public PagedSpace {
// The name of this space.
const char* name_;
+
+ // The space's free list.
+ FixedSizeFreeList free_list_;
};
@@ -2376,18 +2004,83 @@ class MapSpace : public FixedSpace {
AllocationSpace id)
: FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
+ ASSERT(max_map_space_pages < kMaxMapPageIndex);
}
+ // Prepares for a mark-compact GC.
+ virtual void PrepareForMarkCompact(bool will_compact);
+
// Given an index, returns the page address.
- // TODO(1600): this limit is artifical just to keep code compilable
- static const int kMaxMapPageIndex = 1 << 16;
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(Map::kSize)) {
- return RoundDown(size, Map::kSize);
- } else {
- return (size / Map::kSize) * Map::kSize;
+ Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+
+ static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
+
+ // Are map pointers encodable into map word?
+ bool MapPointersEncodable() {
+ if (!FLAG_use_big_map_space) {
+ ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
+ return true;
}
+ return CountPagesToTop() <= max_map_space_pages_;
+ }
+
+ // Should be called after forced sweep to find out if map space needs
+ // compaction.
+ bool NeedsCompaction(int live_maps) {
+ return !MapPointersEncodable() && live_maps <= CompactionThreshold();
+ }
+
+ Address TopAfterCompaction(int live_maps) {
+ ASSERT(NeedsCompaction(live_maps));
+
+ int pages_left = live_maps / kMapsPerPage;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (pages_left-- > 0) {
+ ASSERT(it.has_next());
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ }
+ ASSERT(it.has_next());
+ Page* top_page = it.next();
+ top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ ASSERT(top_page->is_valid());
+
+ int offset = live_maps % kMapsPerPage * Map::kSize;
+ Address top = top_page->ObjectAreaStart() + offset;
+ ASSERT(top < top_page->ObjectAreaEnd());
+ ASSERT(Contains(top));
+
+ return top;
+ }
+
+ void FinishCompaction(Address new_top, int live_maps) {
+ Page* top_page = Page::FromAddress(new_top);
+ ASSERT(top_page->is_valid());
+
+ SetAllocationInfo(&allocation_info_, top_page);
+ allocation_info_.top = new_top;
+
+ int new_size = live_maps * Map::kSize;
+ accounting_stats_.DeallocateBytes(accounting_stats_.Size());
+ accounting_stats_.AllocateBytes(new_size);
+
+ // Flush allocation watermarks.
+ for (Page* p = first_page_; p != top_page; p = p->next_page()) {
+ p->SetAllocationWatermark(p->AllocationTop());
+ }
+ top_page->SetAllocationWatermark(new_top);
+
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ intptr_t actual_size = 0;
+ for (Page* p = first_page_; p != top_page; p = p->next_page())
+ actual_size += kMapsPerPage * Map::kSize;
+ actual_size += (new_top - top_page->ObjectAreaStart());
+ ASSERT(accounting_stats_.Size() == actual_size);
+ }
+#endif
+
+ Shrink();
+ ResetFreeList();
}
protected:
@@ -2405,6 +2098,9 @@ class MapSpace : public FixedSpace {
const int max_map_space_pages_;
+ // An array of page start address in a map space.
+ Address page_addresses_[kMaxMapPageIndex];
+
public:
TRACK_MEMORY("MapSpace")
};
@@ -2420,14 +2116,6 @@ class CellSpace : public FixedSpace {
: FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
{}
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
- return RoundDown(size, JSGlobalPropertyCell::kSize);
- } else {
- return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
- }
- }
-
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
@@ -2445,6 +2133,64 @@ class CellSpace : public FixedSpace {
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.
+// A LargeObjectChunk holds exactly one large object page with exactly one
+// large object.
+class LargeObjectChunk {
+ public:
+ // Allocates a new LargeObjectChunk that contains a large object page
+ // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
+ // object) bytes after the object area start of that page.
+ static LargeObjectChunk* New(int size_in_bytes, Executability executable);
+
+ // Free the memory associated with the chunk.
+ void Free(Executability executable);
+
+ // Interpret a raw address as a large object chunk.
+ static LargeObjectChunk* FromAddress(Address address) {
+ return reinterpret_cast<LargeObjectChunk*>(address);
+ }
+
+ // Returns the address of this chunk.
+ Address address() { return reinterpret_cast<Address>(this); }
+
+ Page* GetPage() {
+ return Page::FromAddress(RoundUp(address(), Page::kPageSize));
+ }
+
+ // Accessors for the fields of the chunk.
+ LargeObjectChunk* next() { return next_; }
+ void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
+ size_t size() { return size_ & ~Page::kPageFlagMask; }
+
+ // Compute the start address in the chunk.
+ Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
+
+ // Returns the object in this chunk.
+ HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
+
+ // Given a requested size returns the physical size of a chunk to be
+ // allocated.
+ static int ChunkSizeFor(int size_in_bytes);
+
+ // Given a chunk size, returns the object size it can accommodate. Used by
+ // LargeObjectSpace::Available.
+ static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+ if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+ return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+ }
+
+ private:
+ // A pointer to the next large object chunk in the space or NULL.
+ LargeObjectChunk* next_;
+
+ // The total size of this chunk.
+ size_t size_;
+
+ public:
+ TRACK_MEMORY("LargeObjectChunk")
+};
+
+
class LargeObjectSpace : public Space {
public:
LargeObjectSpace(Heap* heap, AllocationSpace id);
@@ -2456,15 +2202,12 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
- Executability executable);
+ // Allocates a (non-FixedArray, non-Code) large object.
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
+ // Allocates a large Code object.
+ MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
+ // Allocates a large FixedArray.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space.
inline intptr_t Available();
@@ -2488,7 +2231,10 @@ class LargeObjectSpace : public Space {
// Finds a large object page containing the given pc, returns NULL
// if such a page doesn't exist.
- LargePage* FindPageContainingPc(Address pc);
+ LargeObjectChunk* FindChunkContainingPc(Address pc);
+
+ // Iterates objects covered by dirty regions.
+ void IterateDirtyRegions(ObjectSlotCallback func);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@@ -2497,15 +2243,13 @@ class LargeObjectSpace : public Space {
bool Contains(HeapObject* obj);
// Checks whether the space is empty.
- bool IsEmpty() { return first_page_ == NULL; }
+ bool IsEmpty() { return first_chunk_ == NULL; }
// See the comments for ReserveSpace in the Space class. This has to be
// called after ReserveSpace has been called on the paged spaces, since they
// may use some memory, leaving less for large objects.
virtual bool ReserveSpace(int bytes);
- LargePage* first_page() { return first_page_; }
-
#ifdef DEBUG
virtual void Verify();
virtual void Print();
@@ -2518,11 +2262,17 @@ class LargeObjectSpace : public Space {
private:
// The head of the linked list of large object chunks.
- LargePage* first_page_;
+ LargeObjectChunk* first_chunk_;
intptr_t size_; // allocated bytes
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
+ // Shared implementation of AllocateRaw, AllocateRawCode and
+ // AllocateRawFixedArray.
+ MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
+ int object_size,
+ Executability executable);
+
friend class LargeObjectIterator;
public:
@@ -2535,78 +2285,17 @@ class LargeObjectIterator: public ObjectIterator {
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
- HeapObject* Next();
+ HeapObject* next();
// implementation of ObjectIterator.
- virtual HeapObject* next_object() { return Next(); }
+ virtual HeapObject* next_object() { return next(); }
private:
- LargePage* current_;
+ LargeObjectChunk* current_;
HeapObjectCallback size_func_;
};
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
- public:
- inline explicit PointerChunkIterator(Heap* heap);
-
- // Return NULL when the iterator is done.
- MemoryChunk* next() {
- switch (state_) {
- case kOldPointerState: {
- if (old_pointer_iterator_.has_next()) {
- return old_pointer_iterator_.next();
- }
- state_ = kMapState;
- // Fall through.
- }
- case kMapState: {
- if (map_iterator_.has_next()) {
- return map_iterator_.next();
- }
- state_ = kLargeObjectState;
- // Fall through.
- }
- case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
- }
- case kFinishedState:
- return NULL;
- default:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
-
- private:
- enum State {
- kOldPointerState,
- kMapState,
- kLargeObjectState,
- kFinishedState
- };
- State state_;
- PageIterator old_pointer_iterator_;
- PageIterator map_iterator_;
- LargeObjectIterator lo_iterator_;
-};
-
-
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h
index 4640ed5b08..9c2287eab7 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/splay-tree-inl.h
@@ -45,7 +45,7 @@ template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
if (is_empty()) {
// If the tree is empty, insert the new node.
- root_ = new Node(key, Config::NoValue());
+ root_ = new Node(key, Config::kNoValue);
} else {
// Splay on the key to move the last node on the search path
// for the key to the root of the tree.
@@ -57,7 +57,7 @@ bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
return false;
}
// Insert the new node.
- Node* node = new Node(key, Config::NoValue());
+ Node* node = new Node(key, Config::kNoValue);
InsertInternal(cmp, node);
}
locator->bind(root_);
@@ -226,7 +226,7 @@ template<typename Config, class Allocator>
void SplayTree<Config, Allocator>::Splay(const Key& key) {
if (is_empty())
return;
- Node dummy_node(Config::kNoKey, Config::NoValue());
+ Node dummy_node(Config::kNoKey, Config::kNoValue);
// Create a dummy node. The use of the dummy node is a bit
// counter-intuitive: The right child of the dummy node will hold
// the L tree of the algorithm. The left child of the dummy node
diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h
deleted file mode 100644
index 34f35a487f..0000000000
--- a/deps/v8/src/store-buffer-inl.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STORE_BUFFER_INL_H_
-#define V8_STORE_BUFFER_INL_H_
-
-#include "store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-Address StoreBuffer::TopAddress() {
- return reinterpret_cast<Address>(heap_->store_buffer_top_address());
-}
-
-
-void StoreBuffer::Mark(Address addr) {
- ASSERT(!heap_->cell_space()->Contains(addr));
- ASSERT(!heap_->code_space()->Contains(addr));
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- *top++ = addr;
- heap_->public_set_store_buffer_top(top);
- if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
- ASSERT(top == limit_);
- Compact();
- } else {
- ASSERT(top < limit_);
- }
-}
-
-
-void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
- if (store_buffer_rebuilding_enabled_) {
- ASSERT(!heap_->cell_space()->Contains(addr));
- ASSERT(!heap_->code_space()->Contains(addr));
- ASSERT(!heap_->old_data_space()->Contains(addr));
- ASSERT(!heap_->new_space()->Contains(addr));
- Address* top = old_top_;
- *top++ = addr;
- old_top_ = top;
- old_buffer_is_sorted_ = false;
- old_buffer_is_filtered_ = false;
- if (top >= old_limit_) {
- ASSERT(callback_ != NULL);
- (*callback_)(heap_,
- MemoryChunk::FromAnyPointerAddress(addr),
- kStoreBufferFullEvent);
- }
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
deleted file mode 100644
index ab810e4006..0000000000
--- a/deps/v8/src/store-buffer.cc
+++ /dev/null
@@ -1,694 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap),
- start_(NULL),
- limit_(NULL),
- old_start_(NULL),
- old_limit_(NULL),
- old_top_(NULL),
- old_buffer_is_sorted_(false),
- old_buffer_is_filtered_(false),
- during_gc_(false),
- store_buffer_rebuilding_enabled_(false),
- callback_(NULL),
- may_move_store_buffer_entries_(true),
- virtual_memory_(NULL),
- hash_map_1_(NULL),
- hash_map_2_(NULL) {
-}
-
-
-void StoreBuffer::Setup() {
- virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
- uintptr_t start_as_int =
- reinterpret_cast<uintptr_t>(virtual_memory_->address());
- start_ =
- reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
- limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
-
- old_top_ = old_start_ = new Address[kOldStoreBufferLength];
- old_limit_ = old_start_ + kOldStoreBufferLength;
-
- ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
- ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
- Address* vm_limit = reinterpret_cast<Address*>(
- reinterpret_cast<char*>(virtual_memory_->address()) +
- virtual_memory_->size());
- ASSERT(start_ <= vm_limit);
- ASSERT(limit_ <= vm_limit);
- USE(vm_limit);
- ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
- ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
- 0);
-
- virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
- false); // Not executable.
- heap_->public_set_store_buffer_top(start_);
-
- hash_map_1_ = new uintptr_t[kHashMapLength];
- hash_map_2_ = new uintptr_t[kHashMapLength];
-
- ZapHashTables();
-}
-
-
-void StoreBuffer::TearDown() {
- delete virtual_memory_;
- delete[] hash_map_1_;
- delete[] hash_map_2_;
- delete[] old_start_;
- old_start_ = old_top_ = old_limit_ = NULL;
- start_ = limit_ = NULL;
- heap_->public_set_store_buffer_top(start_);
-}
-
-
-void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->Compact();
-}
-
-
-#if V8_TARGET_ARCH_X64
-static int CompareAddresses(const void* void_a, const void* void_b) {
- intptr_t a =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
- intptr_t b =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
- // Unfortunately if int is smaller than intptr_t there is no branch-free
- // way to return a number with the same sign as the difference between the
- // pointers.
- if (a == b) return 0;
- if (a < b) return -1;
- ASSERT(a > b);
- return 1;
-}
-#else
-static int CompareAddresses(const void* void_a, const void* void_b) {
- intptr_t a =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
- intptr_t b =
- reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
- ASSERT(sizeof(1) == sizeof(a));
- // Shift down to avoid wraparound.
- return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
-}
-#endif
-
-
-void StoreBuffer::Uniq() {
- ASSERT(HashTablesAreZapped());
- // Remove adjacent duplicates and cells that do not point at new space.
- Address previous = NULL;
- Address* write = old_start_;
- ASSERT(may_move_store_buffer_entries_);
- for (Address* read = old_start_; read < old_top_; read++) {
- Address current = *read;
- if (current != previous) {
- if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
- *write++ = current;
- }
- }
- previous = current;
- }
- old_top_ = write;
-}
-
-
-void StoreBuffer::HandleFullness() {
- if (old_buffer_is_filtered_) return;
- ASSERT(may_move_store_buffer_entries_);
- Compact();
-
- old_buffer_is_filtered_ = true;
- bool page_has_scan_on_scavenge_flag = false;
-
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
- }
-
- if (page_has_scan_on_scavenge_flag) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
-
- // If filtering out the entries from scan_on_scavenge pages got us down to
- // less than half full, then we are satisfied with that.
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
-
- // Sample 1 entry in 97 and filter out the pages where we estimate that more
- // than 1 in 8 pointers are to new space.
- static const int kSampleFinenesses = 5;
- static const struct Samples {
- int prime_sample_step;
- int threshold;
- } samples[kSampleFinenesses] = {
- { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
- { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
- { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
- { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
- { 1, 0}
- };
- for (int i = kSampleFinenesses - 1; i >= 0; i--) {
- ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
- // As a last resort we mark all pages as being exempt from the store buffer.
- ASSERT(i != 0 || old_top_ == old_start_);
- if (old_limit_ - old_top_ > old_top_ - old_start_) return;
- }
- UNREACHABLE();
-}
-
-
-// Sample the store buffer to see if some pages are taking up a lot of space
-// in the store buffer.
-void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- chunk->set_store_buffer_counter(0);
- }
- bool created_new_scan_on_scavenge_pages = false;
- MemoryChunk* previous_chunk = NULL;
- for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
- Address addr = *p;
- MemoryChunk* containing_chunk = NULL;
- if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
- containing_chunk = previous_chunk;
- } else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
- }
- int old_counter = containing_chunk->store_buffer_counter();
- if (old_counter == threshold) {
- containing_chunk->set_scan_on_scavenge(true);
- created_new_scan_on_scavenge_pages = true;
- }
- containing_chunk->set_store_buffer_counter(old_counter + 1);
- previous_chunk = containing_chunk;
- }
- if (created_new_scan_on_scavenge_pages) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
- old_buffer_is_filtered_ = true;
-}
-
-
-void StoreBuffer::Filter(int flag) {
- Address* new_top = old_start_;
- MemoryChunk* previous_chunk = NULL;
- for (Address* p = old_start_; p < old_top_; p++) {
- Address addr = *p;
- MemoryChunk* containing_chunk = NULL;
- if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
- containing_chunk = previous_chunk;
- } else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
- previous_chunk = containing_chunk;
- }
- if (!containing_chunk->IsFlagSet(flag)) {
- *new_top++ = addr;
- }
- }
- old_top_ = new_top;
-}
-
-
-void StoreBuffer::SortUniq() {
- Compact();
- if (old_buffer_is_sorted_) return;
- ZapHashTables();
- qsort(reinterpret_cast<void*>(old_start_),
- old_top_ - old_start_,
- sizeof(*old_top_),
- &CompareAddresses);
- Uniq();
-
- old_buffer_is_sorted_ = true;
-}
-
-
-bool StoreBuffer::PrepareForIteration() {
- Compact();
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- bool page_has_scan_on_scavenge_flag = false;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
- }
-
- if (page_has_scan_on_scavenge_flag) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
- ZapHashTables();
- return page_has_scan_on_scavenge_flag;
-}
-
-
-#ifdef DEBUG
-void StoreBuffer::Clean() {
- ZapHashTables();
- Uniq(); // Also removes things that no longer point to new space.
- CheckForFullBuffer();
-}
-
-
-static bool Zapped(char* start, int size) {
- for (int i = 0; i < size; i++) {
- if (start[i] != 0) return false;
- }
- return true;
-}
-
-
-bool StoreBuffer::HashTablesAreZapped() {
- return Zapped(reinterpret_cast<char*>(hash_map_1_),
- sizeof(uintptr_t) * kHashMapLength) &&
- Zapped(reinterpret_cast<char*>(hash_map_2_),
- sizeof(uintptr_t) * kHashMapLength);
-}
-
-
-static Address* in_store_buffer_1_element_cache = NULL;
-
-
-bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
- if (!FLAG_enable_slow_asserts) return true;
- if (in_store_buffer_1_element_cache != NULL &&
- *in_store_buffer_1_element_cache == cell_address) {
- return true;
- }
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- for (Address* current = top - 1; current >= start_; current--) {
- if (*current == cell_address) {
- in_store_buffer_1_element_cache = current;
- return true;
- }
- }
- for (Address* current = old_top_ - 1; current >= old_start_; current--) {
- if (*current == cell_address) {
- in_store_buffer_1_element_cache = current;
- return true;
- }
- }
- return false;
-}
-#endif
-
-
-void StoreBuffer::ZapHashTables() {
- memset(reinterpret_cast<void*>(hash_map_1_),
- 0,
- sizeof(uintptr_t) * kHashMapLength);
- memset(reinterpret_cast<void*>(hash_map_2_),
- 0,
- sizeof(uintptr_t) * kHashMapLength);
-}
-
-
-void StoreBuffer::GCPrologue() {
- ZapHashTables();
- during_gc_ = true;
-}
-
-
-#ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
- // Do nothing.
-}
-
-
-void StoreBuffer::VerifyPointers(PagedSpace* space,
- RegionCallback region_callback) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* page = it.next();
- FindPointersToNewSpaceOnPage(
- reinterpret_cast<PagedSpace*>(page->owner()),
- page,
- region_callback,
- &DummyScavengePointer);
- }
-}
-
-
-void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- if (object->IsFixedArray()) {
- Address slot_address = object->address();
- Address end = object->address() + object->Size();
-
- while (slot_address < end) {
- HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
- // When we are not in GC the Heap::InNewSpace() predicate
- // checks that pointers which satisfy predicate point into
- // the active semispace.
- heap_->InNewSpace(*slot);
- slot_address += kPointerSize;
- }
- }
- }
-}
-#endif
-
-
-void StoreBuffer::Verify() {
-#ifdef DEBUG
- VerifyPointers(heap_->old_pointer_space(),
- &StoreBuffer::FindPointersToNewSpaceInRegion);
- VerifyPointers(heap_->map_space(),
- &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
- VerifyPointers(heap_->lo_space());
-#endif
-}
-
-
-void StoreBuffer::GCEpilogue() {
- during_gc_ = false;
- Verify();
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInRegion(
- Address start, Address end, ObjectSlotCallback slot_callback) {
- for (Address slot_address = start;
- slot_address < end;
- slot_address += kPointerSize) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap_->InNewSpace(*slot)) {
- HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
- ASSERT(object->IsHeapObject());
- slot_callback(reinterpret_cast<HeapObject**>(slot), object);
- if (heap_->InNewSpace(*slot)) {
- EnterDirectlyIntoStoreBuffer(slot_address);
- }
- }
- }
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
- Address page = Page::FromAddress(addr)->ObjectAreaStart();
- return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
- Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
- return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMaps(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback) {
- ASSERT(MapStartAlign(start) == start);
- ASSERT(MapEndAlign(end) == end);
-
- Address map_address = start;
- while (map_address < end) {
- ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
- ASSERT(Memory::Object_at(map_address)->IsMap());
-
- Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
- Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
- FindPointersToNewSpaceInRegion(pointer_fields_start,
- pointer_fields_end,
- slot_callback);
- map_address += Map::kSize;
- }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback) {
- Address map_aligned_start = MapStartAlign(start);
- Address map_aligned_end = MapEndAlign(end);
-
- ASSERT(map_aligned_start == start);
- ASSERT(map_aligned_end == end);
-
- FindPointersToNewSpaceInMaps(map_aligned_start,
- map_aligned_end,
- slot_callback);
-}
-
-
-// This function iterates over all the pointers in a paged space in the heap,
-// looking for pointers into new space. Within the pages there may be dead
-// objects that have not been overwritten by free spaces or fillers because of
-// lazy sweeping. These dead objects may not contain pointers to new space.
-// The garbage areas that have been swept properly (these will normally be the
-// large ones) will be marked with free space and filler map words. In
-// addition any area that has never been used at all for object allocation must
-// be marked with a free space or filler. Because the free space and filler
-// maps do not move we can always recognize these even after a compaction.
-// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps. The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything. Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
-void StoreBuffer::FindPointersToNewSpaceOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback) {
- Address visitable_start = page->ObjectAreaStart();
- Address end_of_page = page->ObjectAreaEnd();
-
- Address visitable_end = visitable_start;
-
- Object* free_space_map = heap_->free_space_map();
- Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
-
- while (visitable_end < end_of_page) {
- Object* o = *reinterpret_cast<Object**>(visitable_end);
- // Skip fillers but not things that look like fillers in the special
- // garbage section which can contain anything.
- if (o == free_space_map ||
- o == two_pointer_filler_map ||
- (visitable_end == space->top() && visitable_end != space->limit())) {
- if (visitable_start != visitable_end) {
- // After calling this the special garbage section may have moved.
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback);
- if (visitable_end >= space->top() && visitable_end < space->limit()) {
- visitable_end = space->limit();
- visitable_start = visitable_end;
- continue;
- }
- }
- if (visitable_end == space->top() && visitable_end != space->limit()) {
- visitable_start = visitable_end = space->limit();
- } else {
- // At this point we are either at the start of a filler or we are at
- // the point where the space->top() used to be before the
- // visit_pointer_region call above. Either way we can skip the
- // object at the current spot: We don't promise to visit objects
- // allocated during heap traversal, and if space->top() moved then it
- // must be because an object was allocated at this point.
- visitable_start =
- visitable_end + HeapObject::FromAddress(visitable_end)->Size();
- visitable_end = visitable_start;
- }
- } else {
- ASSERT(o != free_space_map);
- ASSERT(o != two_pointer_filler_map);
- ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
- visitable_end += kPointerSize;
- }
- }
- ASSERT(visitable_end == end_of_page);
- if (visitable_start != visitable_end) {
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback);
- }
-}
-
-
-void StoreBuffer::IteratePointersInStoreBuffer(
- ObjectSlotCallback slot_callback) {
- Address* limit = old_top_;
- old_top_ = old_start_;
- {
- DontMoveStoreBufferEntriesScope scope(this);
- for (Address* current = old_start_; current < limit; current++) {
-#ifdef DEBUG
- Address* saved_top = old_top_;
-#endif
- Object** slot = reinterpret_cast<Object**>(*current);
- Object* object = *slot;
- if (heap_->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- if (heap_->InNewSpace(*slot)) {
- EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
- }
- }
- ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
- }
- }
-}
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
- // We do not sort or remove duplicated entries from the store buffer because
- // we expect that callback will rebuild the store buffer thus removing
- // all duplicates and pointers to old space.
- bool some_pages_to_scan = PrepareForIteration();
-
- // TODO(gc): we want to skip slots on evacuation candidates
- // but we can't simply figure that out from slot address
- // because slot can belong to a large object.
- IteratePointersInStoreBuffer(slot_callback);
-
- // We are done scanning all the pointers that were in the store buffer, but
- // there may be some pages marked scan_on_scavenge that have pointers to new
- // space that are not in the store buffer. We must scan them now. As we
- // scan, the surviving pointers to new space will be added to the store
- // buffer. If there are still a lot of pointers to new space then we will
- // keep the scan_on_scavenge flag on the page and discard the pointers that
- // were added to the store buffer. If there are not many pointers to new
- // space left on the page we will keep the pointers in the store buffer and
- // remove the flag from the page.
- if (some_pages_to_scan) {
- if (callback_ != NULL) {
- (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
- }
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) {
- chunk->set_scan_on_scavenge(false);
- if (callback_ != NULL) {
- (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
- }
- if (chunk->owner() == heap_->lo_space()) {
- LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
- HeapObject* array = large_page->GetObject();
- ASSERT(array->IsFixedArray());
- Address start = array->address();
- Address end = start + array->Size();
- FindPointersToNewSpaceInRegion(start, end, slot_callback);
- } else {
- Page* page = reinterpret_cast<Page*>(chunk);
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
- FindPointersToNewSpaceOnPage(
- owner,
- page,
- (owner == heap_->map_space() ?
- &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
- &StoreBuffer::FindPointersToNewSpaceInRegion),
- slot_callback);
- }
- }
- }
- if (callback_ != NULL) {
- (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
- }
- }
-}
-
-
-void StoreBuffer::Compact() {
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-
- if (top == start_) return;
-
- // There's no check of the limit in the loop below so we check here for
- // the worst case (compaction doesn't eliminate any pointers).
- ASSERT(top <= limit_);
- heap_->public_set_store_buffer_top(start_);
- if (top - start_ > old_limit_ - old_top_) {
- HandleFullness();
- }
- ASSERT(may_move_store_buffer_entries_);
- // Goes through the addresses in the store buffer attempting to remove
- // duplicates. In the interest of speed this is a lossy operation. Some
- // duplicates will remain. We have two hash tables with different hash
- // functions to reduce the number of unnecessary clashes.
- for (Address* current = start_; current < top; current++) {
- ASSERT(!heap_->cell_space()->Contains(*current));
- ASSERT(!heap_->code_space()->Contains(*current));
- ASSERT(!heap_->old_data_space()->Contains(*current));
- uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
- // Shift out the last bits including any tags.
- int_addr >>= kPointerSizeLog2;
- int hash1 =
- ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
- if (hash_map_1_[hash1] == int_addr) continue;
- int hash2 =
- ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
- hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
- if (hash_map_2_[hash2] == int_addr) continue;
- if (hash_map_1_[hash1] == 0) {
- hash_map_1_[hash1] = int_addr;
- } else if (hash_map_2_[hash2] == 0) {
- hash_map_2_[hash2] = int_addr;
- } else {
- // Rather than slowing down we just throw away some entries. This will
- // cause some duplicates to remain undetected.
- hash_map_1_[hash1] = int_addr;
- hash_map_2_[hash2] = 0;
- }
- old_buffer_is_sorted_ = false;
- old_buffer_is_filtered_ = false;
- *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
- ASSERT(old_top_ <= old_limit_);
- }
- heap_->isolate()->counters()->store_buffer_compactions()->Increment();
- CheckForFullBuffer();
-}
-
-
-void StoreBuffer::CheckForFullBuffer() {
- if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
- HandleFullness();
- }
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
deleted file mode 100644
index 61b97d9e64..0000000000
--- a/deps/v8/src/store-buffer.h
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
-
-#include "allocation.h"
-#include "checks.h"
-#include "globals.h"
-#include "platform.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-class StoreBuffer;
-
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
-typedef void (StoreBuffer::*RegionCallback)(
- Address start, Address end, ObjectSlotCallback slot_callback);
-
-// Used to implement the write barrier by collecting addresses of pointers
-// between spaces.
-class StoreBuffer {
- public:
- explicit StoreBuffer(Heap* heap);
-
- static void StoreBufferOverflow(Isolate* isolate);
-
- inline Address TopAddress();
-
- void Setup();
- void TearDown();
-
- // This is used by the mutator to enter addresses into the store buffer.
- inline void Mark(Address addr);
-
- // This is used by the heap traversal to enter the addresses into the store
- // buffer that should still be in the store buffer after GC. It enters
- // addresses directly into the old buffer because the GC starts by wiping the
- // old buffer and thereafter only visits each cell once so there is no need
- // to attempt to remove any dupes. During the first part of a GC we
- // are using the store buffer to access the old spaces and at the same time
- // we are rebuilding the store buffer using this function. There is, however
- // no issue of overwriting the buffer we are iterating over, because this
- // stage of the scavenge can only reduce the number of addresses in the store
- // buffer (some objects are promoted so pointers to them do not need to be in
- // the store buffer). The later parts of the GC scan the pages that are
- // exempt from the store buffer and process the promotion queue. These steps
- // can overflow this buffer. We check for this and on overflow we call the
- // callback set up with the StoreBufferRebuildScope object.
- inline void EnterDirectlyIntoStoreBuffer(Address addr);
-
- // Iterates over all pointers that go from old space to new space. It will
- // delete the store buffer as it starts so the callback should reenter
- // surviving old-to-new pointers into the store buffer to rebuild it.
- void IteratePointersToNewSpace(ObjectSlotCallback callback);
-
- static const int kStoreBufferOverflowBit = 1 << 16;
- static const int kStoreBufferSize = kStoreBufferOverflowBit;
- static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
- static const int kOldStoreBufferLength = kStoreBufferLength * 16;
- static const int kHashMapLengthLog2 = 12;
- static const int kHashMapLength = 1 << kHashMapLengthLog2;
-
- void Compact();
-
- void GCPrologue();
- void GCEpilogue();
-
- Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
- Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
- Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
- void SetTop(Object*** top) {
- ASSERT(top >= Start());
- ASSERT(top <= Limit());
- old_top_ = reinterpret_cast<Address*>(top);
- }
-
- bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
- bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
-
- // Goes through the store buffer removing pointers to things that have
- // been promoted. Rebuilds the store buffer completely if it overflowed.
- void SortUniq();
-
- void HandleFullness();
- void Verify();
-
- bool PrepareForIteration();
-
-#ifdef DEBUG
- void Clean();
- // Slow, for asserts only.
- bool CellIsInStoreBuffer(Address cell);
-#endif
-
- void Filter(int flag);
-
- private:
- Heap* heap_;
-
- // The store buffer is divided up into a new buffer that is constantly being
- // filled by mutator activity and an old buffer that is filled with the data
- // from the new buffer after compression.
- Address* start_;
- Address* limit_;
-
- Address* old_start_;
- Address* old_limit_;
- Address* old_top_;
-
- bool old_buffer_is_sorted_;
- bool old_buffer_is_filtered_;
- bool during_gc_;
- // The garbage collector iterates over many pointers to new space that are not
- // handled by the store buffer. This flag indicates whether the pointers
- // found by the callbacks should be added to the store buffer or not.
- bool store_buffer_rebuilding_enabled_;
- StoreBufferCallback callback_;
- bool may_move_store_buffer_entries_;
-
- VirtualMemory* virtual_memory_;
- uintptr_t* hash_map_1_;
- uintptr_t* hash_map_2_;
-
- void CheckForFullBuffer();
- void Uniq();
- void ZapHashTables();
- bool HashTablesAreZapped();
- void ExemptPopularPages(int prime_sample_step, int threshold);
-
- void FindPointersToNewSpaceInRegion(Address start,
- Address end,
- ObjectSlotCallback slot_callback);
-
- // For each region of pointers on a page in use from an old space call
- // visit_pointer_region callback.
- // If either visit_pointer_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- void IteratePointersOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceInMaps(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceInMapsRegion(
- Address start,
- Address end,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
- void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-
-#ifdef DEBUG
- void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
- void VerifyPointers(LargeObjectSpace* space);
-#endif
-
- friend class StoreBufferRebuildScope;
- friend class DontMoveStoreBufferEntriesScope;
-};
-
-
-class StoreBufferRebuildScope {
- public:
- explicit StoreBufferRebuildScope(Heap* heap,
- StoreBuffer* store_buffer,
- StoreBufferCallback callback)
- : heap_(heap),
- store_buffer_(store_buffer),
- stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
- stored_callback_(store_buffer->callback_) {
- store_buffer_->store_buffer_rebuilding_enabled_ = true;
- store_buffer_->callback_ = callback;
- (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
- }
-
- ~StoreBufferRebuildScope() {
- store_buffer_->callback_ = stored_callback_;
- store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
- store_buffer_->CheckForFullBuffer();
- }
-
- private:
- Heap* heap_;
- StoreBuffer* store_buffer_;
- bool stored_state_;
- StoreBufferCallback stored_callback_;
-};
-
-
-class DontMoveStoreBufferEntriesScope {
- public:
- explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer),
- stored_state_(store_buffer->may_move_store_buffer_entries_) {
- store_buffer_->may_move_store_buffer_entries_ = false;
- }
-
- ~DontMoveStoreBufferEntriesScope() {
- store_buffer_->may_move_store_buffer_entries_ = stored_state_;
- }
-
- private:
- StoreBuffer* store_buffer_;
- bool stored_state_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_STORE_BUFFER_H_
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index be955c8c06..297105d047 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -568,6 +568,7 @@ function StringSplit(separator, limit) {
}
var subject = TO_STRING_INLINE(this);
limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
+ if (limit === 0) return [];
// ECMA-262 says that if separator is undefined, the result should
// be an array of size 1 containing the entire string. SpiderMonkey
@@ -581,9 +582,6 @@ function StringSplit(separator, limit) {
var length = subject.length;
if (!IS_REGEXP(separator)) {
separator = TO_STRING_INLINE(separator);
-
- if (limit === 0) return [];
-
var separator_length = separator.length;
// If the separator string is empty then return the elements in the subject.
@@ -594,8 +592,6 @@ function StringSplit(separator, limit) {
return result;
}
- if (limit === 0) return [];
-
%_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
if (length === 0) {
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index be79c80085..c89c8f3339 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -27,6 +27,7 @@
#include <stdarg.h>
#include <math.h>
+#include <limits>
#include "globals.h"
#include "utils.h"
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 67451f2b88..55963303c4 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -55,15 +55,7 @@ void StubCache::Initialize(bool create_heap_objects) {
ASSERT(IsPowerOf2(kSecondaryTableSize));
if (create_heap_objects) {
HandleScope scope;
- Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
- for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = heap()->empty_string();
- primary_[i].value = empty;
- }
- for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = heap()->empty_string();
- secondary_[j].value = empty;
- }
+ Clear();
}
}
@@ -497,56 +489,38 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
JSObject* receiver,
- KeyedIC::StubKind stub_kind,
+ bool is_store,
StrictModeFlag strict_mode) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(
- stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
- : Code::KEYED_STORE_IC,
+ is_store ? Code::KEYED_STORE_IC :
+ Code::KEYED_LOAD_IC,
NORMAL,
strict_mode);
- String* name = NULL;
- switch (stub_kind) {
- case KeyedIC::LOAD:
- name = isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
- break;
- case KeyedIC::STORE_NO_TRANSITION:
- name = isolate()->heap()->KeyedStoreElementMonomorphic_symbol();
- break;
- default:
- UNREACHABLE();
- break;
- }
+ String* name = is_store
+ ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
+ : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
- Map* receiver_map = receiver->map();
MaybeObject* maybe_new_code = NULL;
- switch (stub_kind) {
- case KeyedIC::LOAD: {
- KeyedLoadStubCompiler compiler;
- maybe_new_code = compiler.CompileLoadElement(receiver_map);
- break;
- }
- case KeyedIC::STORE_NO_TRANSITION: {
- KeyedStoreStubCompiler compiler(strict_mode);
- maybe_new_code = compiler.CompileStoreElement(receiver_map);
- break;
- }
- default:
- UNREACHABLE();
- break;
+ Map* receiver_map = receiver->map();
+ if (is_store) {
+ KeyedStoreStubCompiler compiler(strict_mode);
+ maybe_new_code = compiler.CompileStoreElement(receiver_map);
+ } else {
+ KeyedLoadStubCompiler compiler;
+ maybe_new_code = compiler.CompileLoadElement(receiver_map);
}
- Code* code = NULL;
+ Code* code;
if (!maybe_new_code->To(&code)) return maybe_new_code;
-
- if (stub_kind == KeyedIC::LOAD) {
+ if (is_store) {
PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
Code::cast(code), 0));
} else {
PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
Code::cast(code), 0));
}
ASSERT(code->IsCode());
@@ -1125,14 +1099,15 @@ MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
void StubCache::Clear() {
- Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = heap()->empty_string();
- primary_[i].value = empty;
+ primary_[i].value = isolate_->builtins()->builtin(
+ Builtins::kIllegal);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
secondary_[j].key = heap()->empty_string();
- secondary_[j].value = empty;
+ secondary_[j].value = isolate_->builtins()->builtin(
+ Builtins::kIllegal);
}
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index d9ec88f514..18c157b165 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -30,7 +30,6 @@
#include "allocation.h"
#include "arguments.h"
-#include "ic-inl.h"
#include "macro-assembler.h"
#include "objects.h"
#include "zone-inl.h"
@@ -188,7 +187,7 @@ class StubCache {
MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
JSObject* receiver,
- KeyedIC::StubKind stub_kind,
+ bool is_store,
StrictModeFlag strict_mode);
// ---
@@ -641,7 +640,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
- MUST_USE_RESULT MaybeObject* CompileLoadPolymorphic(
+ MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics);
@@ -700,14 +699,12 @@ class KeyedStoreStubCompiler: public StubCompiler {
MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
- MUST_USE_RESULT MaybeObject* CompileStorePolymorphic(
+ MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
MapList* receiver_maps,
- CodeList* handler_stubs,
- MapList* transitioned_maps);
+ CodeList* handler_ics);
static void GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array,
- ElementsKind element_kind);
+ bool is_js_array);
static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
bool is_js_array);
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index de4972dd73..eb825c1a74 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -216,10 +216,6 @@ class Token {
return op == LT || op == LTE || op == GT || op == GTE;
}
- static bool IsEqualityOp(Value op) {
- return op == EQ || op == EQ_STRICT;
- }
-
static Value NegateCompareOp(Value op) {
ASSERT(IsCompareOp(op));
switch (op) {
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index a4b16f4f30..c64368e599 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -60,10 +60,8 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context,
- Isolate* isolate) {
+ Handle<Context> global_context) {
global_context_ = global_context;
- isolate_ = isolate;
BuildDictionary(code);
ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
}
@@ -73,12 +71,12 @@ Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
int entry = dictionary_->FindEntry(ast_id);
return entry != NumberDictionary::kNotFound
? Handle<Object>(dictionary_->ValueAt(entry))
- : Handle<Object>::cast(isolate_->factory()->undefined_value());
+ : Isolate::Current()->factory()->undefined_value();
}
bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -92,10 +90,10 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = isolate_->builtins();
+ Builtins* builtins = Isolate::Current()->builtins();
return code->is_keyed_load_stub() &&
*code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
code->ic_state() == MEGAMORPHIC;
@@ -105,7 +103,7 @@ bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -118,10 +116,10 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = isolate_->builtins();
+ Builtins* builtins = Isolate::Current()->builtins();
return code->is_keyed_store_stub() &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
@@ -133,13 +131,13 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
Handle<Object> value = GetInfo(expr->id());
- return value->IsMap() || value->IsSmi() || value->IsJSFunction();
+ return value->IsMap() || value->IsSmi();
}
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Map* first_map = code->FindFirstMap();
@@ -152,7 +150,7 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
ASSERT(StoreIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->id());
+ Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
return Handle<Map>(code->FindFirstMap());
@@ -205,7 +203,6 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
return check;
}
-
Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
CheckType check) {
JSFunction* function = NULL;
@@ -228,14 +225,9 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->id()));
-}
-
-
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
return *GetInfo(expr->id()) ==
- isolate_->builtins()->builtin(id);
+ Isolate::Current()->builtins()->builtin(id);
}
@@ -405,11 +397,11 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types) {
+ Isolate* isolate = Isolate::Current();
Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return;
- if (*object ==
- isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+ if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
// TODO(fschneider): We could collect the maps and signal that
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
@@ -418,7 +410,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
} else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
types->Reserve(4);
ASSERT(object->IsCode());
- isolate_->stub_cache()->CollectMatchingMaps(types, *name, flags);
+ isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
}
}
@@ -496,16 +488,14 @@ void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
for (int i = 0; i < infos->length(); i++) {
- Address target_address = (*infos)[i].target_address();
unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
- ProcessTargetAt(target_address, ast_id);
+ Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
+ ProcessTarget(ast_id, target);
}
}
-void TypeFeedbackOracle::ProcessTargetAt(Address target_address,
- unsigned ast_id) {
- Code* target = Code::GetCodeFromTargetAddress(target_address);
+void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
switch (target->kind()) {
case Code::LOAD_IC:
case Code::STORE_IC:
@@ -514,7 +504,7 @@ void TypeFeedbackOracle::ProcessTargetAt(Address target_address,
if (target->ic_state() == MONOMORPHIC) {
if (target->kind() == Code::CALL_IC &&
target->check_type() != RECEIVER_MAP_CHECK) {
- SetInfo(ast_id, Smi::FromInt(target->check_type()));
+ SetInfo(ast_id, Smi::FromInt(target->check_type()));
} else {
Object* map = target->FindFirstMap();
SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
@@ -539,16 +529,6 @@ void TypeFeedbackOracle::ProcessTargetAt(Address target_address,
SetInfo(ast_id, target);
break;
- case Code::STUB:
- if (target->major_key() == CodeStub::CallFunction &&
- target->has_function_cache()) {
- Object* value = CallFunctionStub::GetCachedValue(target_address);
- if (value->IsJSFunction()) {
- SetInfo(ast_id, value);
- }
- }
- break;
-
default:
break;
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 0ba10aaa5f..448e4c94e7 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -216,9 +216,7 @@ class UnaryOperation;
class TypeFeedbackOracle BASE_EMBEDDED {
public:
- TypeFeedbackOracle(Handle<Code> code,
- Handle<Context> global_context,
- Isolate* isolate);
+ TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsMegamorphicWithTypeInfo(Property* expr);
@@ -245,8 +243,6 @@ class TypeFeedbackOracle BASE_EMBEDDED {
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
- Handle<JSFunction> GetCallTarget(Call* expr);
-
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
@@ -277,14 +273,13 @@ class TypeFeedbackOracle BASE_EMBEDDED {
byte* old_start,
byte* new_start);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTargetAt(Address target_address, unsigned ast_id);
+ void ProcessTarget(unsigned ast_id, Code* target);
// Returns an element from the backing store. Returns undefined if
// there is no information.
Handle<Object> GetInfo(unsigned ast_id);
Handle<Context> global_context_;
- Isolate* isolate_;
Handle<NumberDictionary> dictionary_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 1656664a3d..c910d756b4 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -111,59 +111,47 @@ function URIDecodeOctets(octets, result, index) {
var o1 = octets[1];
if (o0 < 0xe0) {
var a = o0 & 0x1f;
- if ((o1 < 0x80) || (o1 > 0xbf)) {
+ if ((o1 < 0x80) || (o1 > 0xbf))
throw new $URIError("URI malformed");
- }
var b = o1 & 0x3f;
value = (a << 6) + b;
- if (value < 0x80 || value > 0x7ff) {
+ if (value < 0x80 || value > 0x7ff)
throw new $URIError("URI malformed");
- }
} else {
var o2 = octets[2];
if (o0 < 0xf0) {
var a = o0 & 0x0f;
- if ((o1 < 0x80) || (o1 > 0xbf)) {
+ if ((o1 < 0x80) || (o1 > 0xbf))
throw new $URIError("URI malformed");
- }
var b = o1 & 0x3f;
- if ((o2 < 0x80) || (o2 > 0xbf)) {
+ if ((o2 < 0x80) || (o2 > 0xbf))
throw new $URIError("URI malformed");
- }
var c = o2 & 0x3f;
value = (a << 12) + (b << 6) + c;
- if ((value < 0x800) || (value > 0xffff)) {
+ if ((value < 0x800) || (value > 0xffff))
throw new $URIError("URI malformed");
- }
} else {
var o3 = octets[3];
if (o0 < 0xf8) {
var a = (o0 & 0x07);
- if ((o1 < 0x80) || (o1 > 0xbf)) {
+ if ((o1 < 0x80) || (o1 > 0xbf))
throw new $URIError("URI malformed");
- }
var b = (o1 & 0x3f);
- if ((o2 < 0x80) || (o2 > 0xbf)) {
+ if ((o2 < 0x80) || (o2 > 0xbf))
throw new $URIError("URI malformed");
- }
var c = (o2 & 0x3f);
- if ((o3 < 0x80) || (o3 > 0xbf)) {
+ if ((o3 < 0x80) || (o3 > 0xbf))
throw new $URIError("URI malformed");
- }
var d = (o3 & 0x3f);
value = (a << 18) + (b << 12) + (c << 6) + d;
- if ((value < 0x10000) || (value > 0x10ffff)) {
+ if ((value < 0x10000) || (value > 0x10ffff))
throw new $URIError("URI malformed");
- }
} else {
throw new $URIError("URI malformed");
}
}
}
}
- if (0xD800 <= value && value <= 0xDFFF) {
- throw new $URIError("URI malformed");
- }
if (value < 0x10000) {
result[index++] = value;
return index;
@@ -226,8 +214,7 @@ function Decode(uri, reserved) {
if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
for (var i = 1; i < n; i++) {
if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
- octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
- uri.charCodeAt(++k));
+ octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
}
index = URIDecodeOctets(octets, result, index);
} else {
@@ -379,9 +366,7 @@ function CharCodeToHex4Str(cc) {
function IsValidHex(s) {
for (var i = 0; i < s.length; ++i) {
var cc = s.charCodeAt(i);
- if ((48 <= cc && cc <= 57) ||
- (65 <= cc && cc <= 70) ||
- (97 <= cc && cc <= 102)) {
+ if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
// '0'..'9', 'A'..'F' and 'a' .. 'f'.
} else {
return false;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index a523118a39..26c522b89f 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -113,7 +113,7 @@ static inline T AddressFrom(intptr_t x) {
// Return the largest multiple of m which is <= x.
template <typename T>
-static inline T RoundDown(T x, intptr_t m) {
+static inline T RoundDown(T x, int m) {
ASSERT(IsPowerOf2(m));
return AddressFrom<T>(OffsetFrom(x) & -m);
}
@@ -121,8 +121,8 @@ static inline T RoundDown(T x, intptr_t m) {
// Return the smallest multiple of m which is >= x.
template <typename T>
-static inline T RoundUp(T x, intptr_t m) {
- return RoundDown<T>(static_cast<T>(x + m - 1), m);
+static inline T RoundUp(T x, int m) {
+ return RoundDown(x + m - 1, m);
}
@@ -159,15 +159,9 @@ static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
}
-static inline uint32_t RoundDownToPowerOf2(uint32_t x) {
- uint32_t rounded_up = RoundUpToPowerOf2(x);
- if (rounded_up > x) return rounded_up >> 1;
- return rounded_up;
-}
-
-template <typename T, typename U>
-static inline bool IsAligned(T value, U alignment) {
+template <typename T>
+static inline bool IsAligned(T value, T alignment) {
ASSERT(IsPowerOf2(alignment));
return (value & (alignment - 1)) == 0;
}
@@ -176,7 +170,7 @@ static inline bool IsAligned(T value, U alignment) {
// Returns true if (addr + offset) is aligned.
static inline bool IsAddressAligned(Address addr,
intptr_t alignment,
- int offset = 0) {
+ int offset) {
intptr_t offs = OffsetFrom(addr + offset);
return IsAligned(offs, alignment);
}
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 47341e72c5..2de830300d 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -107,10 +107,7 @@ namespace internal {
SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
/* Number of code objects found from pc. */ \
SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached) \
- /* The store-buffer implementation of the write barrier. */ \
- SC(store_buffer_compactions, V8.StoreBufferCompactions) \
- SC(store_buffer_overflows, V8.StoreBufferOverflows)
+ SC(pc_to_code_cached, V8.PcToCodeCached)
#define STATS_COUNTER_LIST_2(SC) \
@@ -129,6 +126,10 @@ namespace internal {
V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
+ SC(map_to_fast_elements, V8.MapToFastElements) \
+ SC(map_to_fast_double_elements, V8.MapToFastDoubleElements) \
+ SC(map_to_slow_elements, V8.MapToSlowElements) \
+ SC(map_to_external_array_elements, V8.MapToExternalArrayElements) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index a04114e701..1e9b5dc142 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -38,7 +38,6 @@
#include "log.h"
#include "runtime-profiler.h"
#include "serialize.h"
-#include "store-buffer.h"
namespace v8 {
namespace internal {
@@ -57,15 +56,6 @@ static EntropySource entropy_source;
bool V8::Initialize(Deserializer* des) {
- // Setting --harmony implies all other harmony flags.
- // TODO(rossberg): Is there a better place to put this?
- if (FLAG_harmony) {
- FLAG_harmony_typeof = true;
- FLAG_harmony_scoping = true;
- FLAG_harmony_proxies = true;
- FLAG_harmony_weakmaps = true;
- }
-
InitializeOncePerProcess();
// The current thread may not yet had entered an isolate to run.
@@ -225,12 +215,6 @@ void V8::InitializeOncePerProcess() {
FLAG_peephole_optimization = !use_crankshaft_;
ElementsAccessor::InitializeOncePerProcess();
-
- if (FLAG_stress_compaction) {
- FLAG_force_marking_deque_overflows = true;
- FLAG_gc_global = true;
- FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
- }
}
} } // namespace v8::internal
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 2e039d429f..e565ca5ae9 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -60,11 +60,10 @@
#include "objects-inl.h"
#include "spaces-inl.h"
#include "heap-inl.h"
-#include "incremental-marking-inl.h"
-#include "mark-compact-inl.h"
#include "log-inl.h"
#include "cpu-profiler-inl.h"
#include "handles-inl.h"
+#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -125,15 +124,6 @@ class V8 : public AllStatic {
static bool use_crankshaft_;
};
-
-// JavaScript defines two kinds of 'nil'.
-enum NilValue { kNullValue, kUndefinedValue };
-
-
-// JavaScript defines two kinds of equality.
-enum EqualityKind { kStrictEquality, kNonStrictEquality };
-
-
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 09d26d2f11..eb5c49d751 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -79,20 +79,18 @@ const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
const uint32_t kSlotsZapValue = 0xbeefdeef;
const uint32_t kDebugZapValue = 0xbadbaddb;
-const uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
-const int kPageSizeBits = 20;
+// Number of bits to represent the page size for paged spaces. The value of 13
+// gives 8K bytes per page.
+const int kPageSizeBits = 13;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
@@ -100,6 +98,10 @@ const int kPageSizeBits = 20;
const int kProcessorCacheLineSize = 64;
// Constants relevant to double precision floating point numbers.
+
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
@@ -129,7 +131,6 @@ class FixedArray;
class FunctionEntry;
class FunctionLiteral;
class FunctionTemplateInfo;
-class MemoryChunk;
class NumberDictionary;
class StringDictionary;
template <typename T> class Handle;
@@ -253,6 +254,12 @@ struct CodeDesc {
};
+// Callback function on object slots, used for iterating heap object slots in
+// HeapObjects, global pointers to heap objects, etc. The callback allows the
+// callback function to change the value of the slot.
+typedef void (*ObjectSlotCallback)(HeapObject** pointer);
+
+
// Callback function used for iterating objects in heap spaces,
// for example, scanning heap objects.
typedef int (*HeapObjectCallback)(HeapObject* obj);
@@ -299,9 +306,7 @@ enum CallFunctionFlags {
NO_CALL_FUNCTION_FLAGS = 0,
// Receiver might implicitly be the global objects. If it is, the
// hole is passed to the call function stub.
- RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
- // The call target is cached in the instruction stream.
- RECORD_CALL_TARGET = 1 << 1
+ RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
};
@@ -311,19 +316,6 @@ enum InlineCacheHolderFlag {
};
-// The Store Buffer (GC).
-typedef enum {
- kStoreBufferFullEvent,
- kStoreBufferStartScanningPagesEvent,
- kStoreBufferScanningPageEvent
-} StoreBufferEvent;
-
-
-typedef void (*StoreBufferCallback)(Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event);
-
-
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
@@ -496,7 +488,7 @@ enum StrictModeFlag {
// Used to specify if a macro instruction must perform a smi check on tagged
// values.
enum SmiCheckType {
- DONT_DO_SMI_CHECK,
+ DONT_DO_SMI_CHECK = 0,
DO_SMI_CHECK
};
@@ -504,7 +496,7 @@ enum SmiCheckType {
// Used to specify whether a receiver is implicitly or explicitly
// provided to a call.
enum CallKind {
- CALL_AS_METHOD,
+ CALL_AS_METHOD = 0,
CALL_AS_FUNCTION
};
@@ -518,35 +510,6 @@ const uint64_t kHoleNanInt64 =
const uint64_t kLastNonNaNInt64 =
(static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
-
-enum VariableMode {
- // User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
- CONST, // declared via 'const' declarations
-
- LET, // declared via 'let' declarations
-
- // Variables introduced by the compiler:
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
-
- DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
- // variable is global unless it has been shadowed
- // by an eval-introduced variable
-
- DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
-
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY // temporary variables (not user-visible), never
- // in a context
-};
-
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index dee3032378..588bdb21bb 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -193,14 +193,13 @@ function GlobalEval(x) {
function SetUpGlobal() {
%CheckIsBootstrapping();
// ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
// ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
// ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
// Set up non-enumerable function on the global object.
InstallFunctions(global, DONT_ENUM, $Array(
@@ -690,7 +689,12 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
-function DefineObjectProperty(obj, p, desc, should_throw) {
+function DefineOwnProperty(obj, p, desc, should_throw) {
+ if (%IsJSProxy(obj)) {
+ var attributes = FromGenericPropertyDescriptor(desc);
+ return DefineProxyProperty(obj, p, attributes, should_throw);
+ }
+
var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (current_or_access === false) return void 0;
@@ -854,63 +858,6 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
-// ES5 section 15.4.5.1.
-function DefineArrayProperty(obj, p, desc, should_throw) {
- var length_desc = GetOwnProperty(obj, "length");
- var length = length_desc.getValue();
-
- // Step 3 - Special handling for the length property.
- if (p == "length") {
- if (!desc.hasValue()) {
- return DefineObjectProperty(obj, "length", desc, should_throw);
- }
- var new_length = ToUint32(desc.getValue());
- if (new_length != ToNumber(desc.getValue())) {
- throw new $RangeError('defineProperty() array length out of range');
- }
- // TODO(1756): There still are some uncovered corner cases left on how to
- // handle changes to the length property of arrays.
- return DefineObjectProperty(obj, "length", desc, should_throw);
- }
-
- // Step 4 - Special handling for array index.
- var index = ToUint32(p);
- if (index == ToNumber(p) && index != 4294967295) {
- if ((index >= length && !length_desc.isWritable()) ||
- !DefineObjectProperty(obj, p, desc, true)) {
- if (should_throw) {
- throw MakeTypeError("define_disallowed", [p]);
- } else {
- return;
- }
- }
- if (index >= length) {
- // TODO(mstarzinger): We should actually set the value of the property
- // descriptor here and pass it to DefineObjectProperty(). Take a look at
- // ES5 section 15.4.5.1, step 4.e.i and 4.e.ii for details.
- obj.length = index + 1;
- }
- return true;
- }
-
- // Step 5 - Fallback to default implementation.
- return DefineObjectProperty(obj, p, desc, should_throw);
-}
-
-
-// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- if (%IsJSProxy(obj)) {
- var attributes = FromGenericPropertyDescriptor(desc);
- return DefineProxyProperty(obj, p, attributes, should_throw);
- } else if (IS_ARRAY(obj)) {
- return DefineArrayProperty(obj, p, desc, should_throw);
- } else {
- return DefineObjectProperty(obj, p, desc, should_throw);
- }
-}
-
-
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
if (!IS_SPEC_OBJECT(obj))
@@ -1095,21 +1042,12 @@ function ProxyFix(obj) {
throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
}
- if (%IsJSFunctionProxy(obj)) {
+ if (IS_SPEC_FUNCTION(obj)) {
var callTrap = %GetCallTrap(obj);
var constructTrap = %GetConstructTrap(obj);
var code = DelegateCallAndConstruct(callTrap, constructTrap);
%Fix(obj); // becomes a regular function
%SetCode(obj, code);
- // TODO(rossberg): What about length and other properties? Not specified.
- // We just put in some half-reasonable defaults for now.
- var prototype = new $Object();
- $Object.defineProperty(prototype, "constructor",
- {value: obj, writable: true, enumerable: false, configrable: true});
- $Object.defineProperty(obj, "prototype",
- {value: prototype, writable: true, enumerable: false, configrable: false})
- $Object.defineProperty(obj, "length",
- {value: 0, writable: true, enumerable: false, configrable: false});
} else {
%Fix(obj);
}
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index c854f04124..aada521e4c 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -142,14 +142,8 @@ inline void CopyWords(T* dst, T* src, int num_words) {
}
-template <typename T, typename U>
-static inline void MemsetPointer(T** dest, U* value, int counter) {
-#ifdef DEBUG
- T* a = NULL;
- U* b = NULL;
- a = b; // Fake assignment to check assignability.
- USE(a);
-#endif // DEBUG
+template <typename T>
+static inline void MemsetPointer(T** dest, T* value, int counter) {
#if defined(V8_HOST_ARCH_IA32)
#define STOS "stosl"
#elif defined(V8_HOST_ARCH_X64)
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 076cdc0a48..971061b053 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -37,7 +37,7 @@ namespace internal {
// ----------------------------------------------------------------------------
// Implementation Variable.
-const char* Variable::Mode2String(VariableMode mode) {
+const char* Variable::Mode2String(Mode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
@@ -55,7 +55,7 @@ const char* Variable::Mode2String(VariableMode mode) {
Variable::Variable(Scope* scope,
Handle<String> name,
- VariableMode mode,
+ Mode mode,
bool is_valid_LHS,
Kind kind)
: scope_(scope),
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 612d8d33c4..56c8dabd37 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -40,6 +40,34 @@ namespace internal {
class Variable: public ZoneObject {
public:
+ enum Mode {
+ // User declared variables:
+ VAR, // declared via 'var', and 'function' declarations
+
+ CONST, // declared via 'const' declarations
+
+ LET, // declared via 'let' declarations
+
+ // Variables introduced by the compiler:
+ DYNAMIC, // always require dynamic lookup (we don't know
+ // the declaration)
+
+ DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
+ // variable is global unless it has been shadowed
+ // by an eval-introduced variable
+
+ DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
+
+ INTERNAL, // like VAR, but not user-visible (may or may not
+ // be in a context)
+
+ TEMPORARY // temporary variables (not user-visible), never
+ // in a context
+ };
+
enum Kind {
NORMAL,
THIS,
@@ -75,12 +103,12 @@ class Variable: public ZoneObject {
Variable(Scope* scope,
Handle<String> name,
- VariableMode mode,
+ Mode mode,
bool is_valid_lhs,
Kind kind);
// Printing support
- static const char* Mode2String(VariableMode mode);
+ static const char* Mode2String(Mode mode);
bool IsValidLeftHandSide() { return is_valid_LHS_; }
@@ -91,7 +119,7 @@ class Variable: public ZoneObject {
Scope* scope() const { return scope_; }
Handle<String> name() const { return name_; }
- VariableMode mode() const { return mode_; }
+ Mode mode() const { return mode_; }
bool is_accessed_from_inner_scope() const {
return is_accessed_from_inner_scope_;
}
@@ -149,7 +177,7 @@ class Variable: public ZoneObject {
private:
Scope* scope_;
Handle<String> name_;
- VariableMode mode_;
+ Mode mode_;
Kind kind_;
Location location_;
int index_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 30402266a1..da7c363a6c 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 7
-#define BUILD_NUMBER 0
-#define PATCH_LEVEL 0
+#define MINOR_VERSION 6
+#define BUILD_NUMBER 6
+#define PATCH_LEVEL 6
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index 0ee330668a..fca5c137ef 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -75,7 +75,6 @@
// makes it impossible to have them elsewhere.
#include <winsock2.h>
#include <ws2tcpip.h>
-#include <wspiapi.h>
#include <process.h> // for _beginthreadex()
#include <stdlib.h>
#endif // V8_WIN32_HEADERS_FULL
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 10f0b886da..8db54f0752 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -242,11 +242,6 @@ void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (IsCodeTarget(rmode_)) {
Assembler::set_target_address_at(pc_, target);
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- if (host() != NULL) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
} else {
Memory::Address_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
@@ -284,12 +279,8 @@ Address* RelocInfo::target_reference_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
+ *reinterpret_cast<Object**>(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
- if (host() != NULL && target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
}
@@ -315,12 +306,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
- if (host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
}
@@ -359,11 +344,6 @@ void RelocInfo::set_call_address(Address target) {
target;
CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
sizeof(Address));
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
}
@@ -388,7 +368,7 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
@@ -416,7 +396,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 1c4980ebc1..745fdaeb8f 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -47,7 +47,7 @@ uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
void CpuFeatures::Probe() {
- ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
+ ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -2983,7 +2983,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 0d870537ff..2e373faac5 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -215,12 +215,6 @@ struct XMMRegister {
return names[index];
}
- static XMMRegister from_code(int code) {
- ASSERT(code >= 0);
- ASSERT(code < kNumRegisters);
- XMMRegister r = { code };
- return r;
- }
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
@@ -741,10 +735,6 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_32(0x0, dst, src);
}
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src);
}
@@ -1404,14 +1394,13 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
protected:
bool emit_debug_code() const { return emit_debug_code_; }
private:
byte* addr_at(int pos) { return buffer_ + pos; }
+ byte byte_at(int pos) { return buffer_[pos]; }
+ void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 79ddb1393e..db06909daa 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -79,12 +79,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- rdi: constructor function
// -----------------------------------
- Label slow, non_function_call;
+ Label non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(rdi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
+ __ j(not_equal, &non_function_call);
// Jump to the function-specific construct stub.
__ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -94,19 +94,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// rdi: called object
// rax: number of arguments
- // rcx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
__ bind(&non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
// Set expected number of arguments to zero (not changing rax).
__ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(rcx, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -119,278 +110,272 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ // Enter a construct frame.
+ __ EnterConstructFrame();
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ // Store a smi-tagged arguments count on the stack.
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
- // Push the function to invoke on the stack.
- __ push(rdi);
+ // Push the function to invoke on the stack.
+ __ push(rdi);
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ movq(kScratchRegister, debug_step_in_fp);
+ __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ j(not_equal, &rt_call);
#endif
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
+ __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // rdi: constructor
+ // rax: initial map
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
+ __ j(not_zero, &allocate);
- __ push(rax);
- __ push(rdi);
+ __ push(rax);
+ __ push(rdi);
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ push(rdi); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ pop(rdi);
- __ pop(rax);
+ __ pop(rdi);
+ __ pop(rax);
- __ bind(&allocate);
- }
+ __ bind(&allocate);
+ }
- // Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ // Now allocate the JSObject on the heap.
+ __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shl(rdi, Immediate(kPointerSizeLog2));
+ // rdi: size of new object
+ __ AllocateInNewSpace(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // rdi: start of next object
+ __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ // Set extra fields in the newly allocated object.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ { Label loop, entry;
+ // To allow for truncation.
if (count_constructions) {
- __ movzxbq(rsi,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
- Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
- // rsi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
- __ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- }
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- { Label loop, entry;
+ } else {
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(below, &loop);
}
+ __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rdi);
+ __ j(less, &loop);
+ }
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ or_(rbx, Immediate(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ // Calculate total properties described map.
+ __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ addq(rdx, rcx);
+ // Calculate unused properties past the end of the in-object properties.
+ __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subq(rdx, rcx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+ __ Assert(positive, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // rbx: JSObject
+ // rdi: start of next object (will be start of FixedArray)
+ // rdx: number of elements in properties array
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
+
+ // Initialize the FixedArray.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rdx: number of elements
+ // rax: start of next object
+ __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+ __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+
+ // Initialize the fields to undefined.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rax: start of next object
+ // rdx: number of elements
+ { Label loop, entry;
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(below, &loop);
+ }
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // rbx: JSObject
+ // rdi: FixedArray
+ __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
- // Continue with JSObject being successfully allocated
- // rbx: JSObject
- __ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
- }
+ // Continue with JSObject being successfully allocated
+ // rbx: JSObject
+ __ jmp(&allocated);
- // Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
- __ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // rbx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(rbx);
+ }
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(rdi);
+ // Allocate the new receiver object using the runtime call.
+ // rdi: function (constructor)
+ __ bind(&rt_call);
+ // Must restore rdi (constructor) before calling runtime.
+ __ movq(rdi, Operand(rsp, 0));
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ movq(rbx, rax); // store result in rbx
- // Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
+ // New object allocated.
+ // rbx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(rdi);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movq(rax, Operand(rsp, 0));
+ __ SmiToInteger32(rax, rax);
- // Setup pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(rbx);
+ __ push(rbx);
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Setup pointer to last argument.
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
+ // Call the function.
+ if (is_api_function) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(rax, &use_receiver);
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &exit);
- // Leave construct frame.
- }
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0));
+
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
+ __ LeaveConstructFrame();
// Remove caller arguments from the stack and return.
__ pop(rcx);
@@ -428,108 +413,104 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// - Object*** argv
// (see Handle::Invoke in execution.cc).
- // Open a C++ scope for the FrameScope.
- {
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
-
+ // Platform specific argument handling. After this, the stack contains
+ // an internal frame and the pushed function and receiver, and
+ // register rax and rbx holds the argument count and argument array,
+ // while rdi holds the function pointer and rsi the context.
#ifdef _WIN64
- // MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
-
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ // MSVC parameters in:
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
+ // [rsp+0x20] : argv
+
+ // Clear the context before we push it when entering the JS frame.
+ __ Set(rsi, 0);
+ __ EnterInternalFrame();
+
+ // Load the function context into rsi.
+ __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(rdx);
+ __ push(r8);
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, r9);
+ // Load the previous frame pointer to access C argument on stack
+ __ movq(kScratchRegister, Operand(rbp, 0));
+ __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ // Load the function pointer into rdi.
+ __ movq(rdi, rdx);
#else // _WIN64
- // GCC parameters in:
- // rdi : entry (ignored)
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
-
- __ movq(rdi, rsi);
- // rdi : function
-
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // GCC parameters in:
+ // rdi : entry (ignored)
+ // rsi : function
+ // rdx : receiver
+ // rcx : argc
+ // r8 : argv
+
+ __ movq(rdi, rsi);
+ // rdi : function
+
+ // Clear the context before we push it when entering the JS frame.
+ __ Set(rsi, 0);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push the function and receiver and setup the context.
+ __ push(rdi);
+ __ push(rdx);
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, rcx);
+ __ movq(rbx, r8);
#endif // _WIN64
- // Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
- // Current register contents:
- // rax : argc
- // rbx : argv
- // rsi : context
- // rdi : function
-
- // Copy arguments to the stack in a loop.
- // Register rbx points to array of pointers to handle locations.
- // Push the values of these handles.
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop);
-
- // Invoke the code.
- if (is_construct) {
- // Expects rdi to hold function pointer.
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
- // Exit the internal frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
+ // Current stack contents:
+ // [rsp + 2 * kPointerSize ... ]: Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
+ // Current register contents:
+ // rax : argc
+ // rbx : argv
+ // rsi : context
+ // rdi : function
+
+ // Copy arguments to the stack in a loop.
+ // Register rbx points to array of pointers to handle locations.
+ // Push the values of these handles.
+ Label loop, entry;
+ __ Set(rcx, 0); // Set loop variable to 0.
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addq(rcx, Immediate(1));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(not_equal, &loop);
+
+ // Invoke the code.
+ if (is_construct) {
+ // Expects rdi to hold function pointer.
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(rax);
+ // Function must be in rdi.
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
+ // Exit the JS frame. Notice that this also removes the empty
+ // context and the function left on the stack by the code
+ // invocation.
+ __ LeaveInternalFrame();
// TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // Remove receiver.
+ __ ret(1 * kPointerSize); // remove receiver
}
@@ -545,24 +526,23 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -572,24 +552,23 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore function.
+ __ pop(rdi);
- // Tear down internal frame.
- }
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -600,15 +579,14 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down internal frame.
- }
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it.
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -645,10 +623,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
+ __ EnterInternalFrame();
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ __ LeaveInternalFrame();
__ Popad();
__ ret(0);
}
@@ -718,21 +695,18 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- }
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ Set(rdx, 0); // indicate regular JS_FUNCTION
+ __ pop(rax);
+ __ SmiToInteger32(rax, rax);
+ __ LeaveInternalFrame();
// Restore the function to rdi.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ jmp(&patch_receiver, Label::kNear);
@@ -833,162 +807,160 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// rsp+8: arguments
// rsp+16: receiver ("this")
// rsp+24: function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
-
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver, Label::kNear);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ EnterInternalFrame();
+ // Stack frame:
+ // rbp: Old base pointer
+ // rbp[1]: return address
+ // rbp[2]: function arguments
+ // rbp[3]: receiver
+ // rbp[4]: function
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(Operand(rbp, kArgumentsOffset));
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movq(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subq(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(rax); // limit
+ __ push(Immediate(0)); // index
+
+ // Get the receiver.
+ __ movq(rbx, Operand(rbp, kReceiverOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &push_receiver);
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
+ // Change context eagerly to get the right global object if necessary.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(rax);
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &push_receiver);
+
+ // Do not transform the receiver for natives.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ j(equal, &use_global_receiver);
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &use_global_receiver);
+
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &push_receiver);
+
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ jmp(&push_receiver, Label::kNear);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(rbx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Push the nth argument.
+ __ push(rax);
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ // Update the index on the stack and in register rax.
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ __ movq(Operand(rbp, kIndexOffset), rax);
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
+ __ bind(&entry);
+ __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ j(not_equal, &loop);
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(rax);
+ __ SmiToInteger32(rax, rax);
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &call_proxy);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
- frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ LeaveInternalFrame();
+ __ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(rdi); // add function proxy as last argument
+ __ incq(rax);
+ __ Set(rbx, 0);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+ __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
- // Leave internal frame.
- }
+ __ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
}
@@ -1548,11 +1520,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1570,9 +1541,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
StackCheckStub stub;
__ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
+ __ Abort("Unreachable code: returned from tail call.");
__ bind(&ok);
__ ret(0);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 7d41ffe539..df4438b734 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -155,70 +155,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + (1 * kPointerSize)]: function
- // [rsp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-
- // Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // If this block context is nested in the global context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the global context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmpq(rcx, Immediate(0));
- __ Assert(equal, message);
- }
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Setup the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -297,8 +233,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
Label patch;
const Register argument = rax;
const Register map = rdx;
@@ -394,25 +328,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- __ PushCallerSaved(save_doubles_);
- const int argument_count = 1;
- __ PrepareCallCFunction(argument_count);
-#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- __ PopCallerSaved(save_doubles_);
- __ ret(0);
-}
-
-
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
@@ -707,13 +622,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
- }
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rcx, rax);
+ __ pop(rax);
+ __ LeaveInternalFrame();
__ bind(&heapnumber_allocated);
// rcx: allocated 'empty' number
@@ -837,10 +751,6 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -1543,12 +1453,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ addq(rsp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
+ __ EnterInternalFrame();
+ // Allocate an unused object bigger than a HeapNumber.
+ __ Push(Smi::FromInt(2 * kDoubleSize));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
__ Ret();
}
@@ -1564,11 +1473,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call);
__ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2438,6 +2346,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
// Stack frame on entry.
// rsp[0]: return address
@@ -2758,18 +2670,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store last subject and last input.
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastSubjectOffset,
- rax,
- rdi,
- kDontSaveFPRegs);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastInputOffset,
- rax,
- rdi,
- kDontSaveFPRegs);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
// Get the static offsets vector filled by the native regexp code.
__ LoadAddress(rcx,
@@ -3325,22 +3231,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Code* code) {
- code->set_has_function_cache(false);
-}
-
-
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- UNREACHABLE();
-}
-
-
-Object* CallFunctionStub::GetCachedValue(Address address) {
- UNREACHABLE();
- return NULL;
-}
-
-
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow, non_function;
@@ -3429,35 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
-#ifdef _WIN64
- return result_size_ == 1;
-#else
- return true;
-#endif
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- // It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode()->set_is_pregenerated(true);
-}
-
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Throw exception in eax.
__ Throw(rax);
@@ -3896,7 +3757,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
- // Get return address and delta to inlined map check.
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
@@ -3931,11 +3791,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Store offset of true in the root array at the inline check site.
- int true_offset = 0x100 +
- (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- ASSERT(true_offset >= 0 && true_offset < 0x100);
- __ movl(rax, Immediate(true_offset));
+ ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+ == 0xB0 - 0x100);
+ __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize.
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3954,11 +3812,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Store offset of false in the root array at the inline check site.
- int false_offset = 0x100 +
- (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- ASSERT(false_offset >= 0 && false_offset < 0x100);
- __ movl(rax, Immediate(false_offset));
+ ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+ == 0xB8 - 0x100);
+ __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize.
__ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
__ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -4080,23 +3936,22 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Heap::kEmptyStringRootIndex);
__ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
- ASSERT(!kScratchRegister.is(scratch_));
- __ movq(kScratchRegister, FieldOperand(object_, ConsString::kFirstOffset));
+ __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string, Label::kNear);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ movq(kScratchRegister, FieldOperand(object_, SlicedString::kParentOffset));
+ __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
__ bind(&assure_seq_string);
- __ movq(result_, FieldOperand(kScratchRegister, HeapObject::kMapOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
- __ movq(object_, kScratchRegister);
+ __ jmp(&flat_string);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -5416,13 +5271,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ Push(Smi::FromInt(op_));
- __ CallExternalReference(miss, 3);
- }
+ __ EnterInternalFrame();
+ __ push(rdx);
+ __ push(rax);
+ __ Push(Smi::FromInt(op_));
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -5553,8 +5407,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
// Stack frame on entry:
// esp[0 * kPointerSize]: return address.
// esp[1 * kPointerSize]: key's hash.
@@ -5640,279 +5492,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { rbx, rax, rdi, EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { rbx, rcx, rdx, OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { rdx, rcx, rbx, EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore.
- { rbx, r8, r9, EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { rbx, rdx, rcx, EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { rdi, rdx, rcx, EMIT_REMEMBERED_SET},
- // Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
-};
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(),
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-#ifdef _WIN64
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
- Register address =
- arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(arg1));
- __ Move(address, regs_.address());
- __ Move(arg1, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ movq(arg2, Operand(address, 0));
- }
- __ LoadAddress(arg3, ExternalReference::isolate_address());
- int argument_count = 3;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_object;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &on_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- zero,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 698ba403cd..4058118eef 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -59,32 +59,6 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -439,8 +413,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -453,7 +425,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryLookup; }
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@@ -474,253 +446,6 @@ class StringDictionaryLookupStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- ASSERT(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always rcx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
- if (scratch0.is(rcx)) {
- scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
- }
- if (object.is(rcx)) {
- object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(rcx)) {
- address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
- }
- ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!address_orig_.is(object_));
- ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
- masm->push(rcx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->movq(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->movq(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with rcx.
- if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
- masm->pop(rcx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved.
-
- // The three scratch registers (incl. rcx) will be restored by other means
- // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
- // save and don't need to be preserved.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always rcx.
-
- Register GetRegThatIsNotRcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(rcx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
} } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f6102c7c73..507bbd44c3 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -38,16 +38,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
+ masm->EnterInternalFrame();
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
+ masm->LeaveInternalFrame();
}
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 2149fc2d14..423e6f2441 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -100,66 +100,65 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- ASSERT(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- // Store the 64-bit value as two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- }
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as as two smis causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ ASSERT(!reg.is(kScratchRegister));
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
}
+ // Store the 64-bit value as two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ movq(kScratchRegister, reg);
+ __ Integer32ToSmi(reg, reg);
+ __ push(reg);
+ __ sar(kScratchRegister, Immediate(32));
+ __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+ __ push(kScratchRegister);
+ }
+ }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
- }
+ __ Set(rax, 0); // No arguments (argc == 0).
+ __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, kDebugZapValue);
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ // Reconstruct the 64-bit value from two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ shl(kScratchRegister, Immediate(32));
+ __ pop(reg);
+ __ SmiToInteger32(reg, reg);
+ __ or_(reg, kScratchRegister);
}
-
- // Get rid of the internal frame.
}
+ // Get rid of the internal frame.
+ __ LeaveInternalFrame();
+
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index b7e334ee75..b52e659320 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -197,19 +197,13 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Destroy the code which is not supposed to run again.
ZapCodeRange(previous_pc, jump_table_address);
#endif
- Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
+ DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -226,8 +220,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@@ -257,13 +250,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
-
- RelocInfo rinfo(call_target_address,
- RelocInfo::CODE_TARGET,
- 0,
- unoptimized_code);
- unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- unoptimized_code, &rinfo, replacement_code);
}
@@ -282,8 +268,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
- check_code->GetHeap()->incremental_marking()->
- RecordCodeTargetPatch(call_target_address, check_code);
}
@@ -729,10 +713,7 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -778,11 +759,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ PrepareCallCFunction(2);
__ movq(arg1, rax);
__ LoadAddress(arg2, ExternalReference::isolate_address());
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
- }
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 2);
__ pop(rax);
// Replace the current frame with the output frames.
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index b5c5fc5e71..556523fada 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -147,11 +147,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -200,9 +195,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(
- rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
}
}
}
@@ -254,7 +251,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -641,11 +638,10 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ movq(location, src);
-
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
+ __ RecordWrite(scratch0, offset, src, scratch1);
}
}
@@ -677,7 +673,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- VariableMode mode,
+ Variable::Mode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@@ -695,7 +691,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ movq(StackOperand(variable), result_register());
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(StackOperand(variable), kScratchRegister);
@@ -719,16 +715,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VisitForAccumulatorValue(function);
__ movq(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(rsi,
- offset,
- result_register(),
- rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ movq(rbx, rsi);
+ __ RecordWrite(rbx, offset, result_register(), rcx);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -742,8 +732,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == VAR || mode == CONST || mode == LET);
- PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST ||
+ mode == Variable::LET);
+ PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -751,7 +743,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// must not destroy the current value.
if (function != NULL) {
VisitForStackValue(function);
- } else if (mode == CONST || mode == LET) {
+ } else if (mode == Variable::CONST || mode == Variable::LET) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
@@ -1176,21 +1168,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
+ if (var->mode() == Variable::DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
+ } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST || local->mode() == LET) {
+ if (local->mode() == Variable::CONST) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST) {
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ jmp(done);
}
@@ -1221,7 +1208,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::LOCAL:
case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (var->mode() != LET && var->mode() != CONST) {
+ if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
@@ -1229,10 +1216,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET) {
+ if (var->mode() == Variable::LET) {
__ Push(var->name());
__ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else { // CONST
+ } else { // Variable::CONST
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1458,23 +1445,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
- __ movq(r8, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(r8, JSObject::kElementsOffset));
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ movq(FieldOperand(rbx, offset), result_register());
- Label no_map_change;
- __ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store.
- __ RecordWriteField(rbx, offset, result_register(), rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
- __ CheckFastSmiOnlyElements(rdi, &no_map_change, Label::kNear);
- __ push(r8);
- __ CallRuntime(Runtime::kNonSmiElementStored, 1);
- __ bind(&no_map_change);
+ __ RecordWrite(rbx, offset, result_register(), rcx);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1779,7 +1756,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(rax); // Value.
@@ -1800,12 +1777,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ movq(location, rax);
if (var->IsContextSlot()) {
__ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
}
}
- } else if (var->mode() != CONST) {
+ } else if (var->mode() != Variable::CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
@@ -1819,8 +1795,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ movq(location, rax);
if (var->IsContextSlot()) {
__ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
}
} else {
ASSERT(var->IsLookupSlot());
@@ -2029,8 +2004,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
- StrictModeFlag strict_mode =
- FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
+ StrictModeFlag strict_mode = strict_mode_flag();
+ if (FLAG_harmony_block_scoping) {
+ strict_mode = kStrictMode;
+ }
__ Push(Smi::FromInt(strict_mode));
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
@@ -2072,7 +2049,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
+ if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@@ -2568,24 +2545,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
// Map is now in rax.
__ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
+
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
+
+ // Check if the constructor in the map is a function.
__ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
@@ -2753,7 +2726,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ movq(rdx, rax);
- __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
+ __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
__ bind(&done);
context()->Plug(rax);
@@ -3037,33 +3010,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ movq(Operand(index_2, 0), object);
__ movq(Operand(index_1, 0), temp);
- Label no_remembered_set;
- __ CheckPageFlag(elements,
- temp,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &no_remembered_set,
- Label::kNear);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- // We are swapping two objects in an array and the incremental marker never
- // pauses in the middle of scanning a single object. Therefore the
- // incremental marker is not disturbed, so we don't need to call the
- // RecordWrite stub that notifies the incremental marker.
- __ RememberedSetHelper(elements,
- index_1,
- temp,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
- __ RememberedSetHelper(elements,
- index_2,
- temp,
- kDontSaveFPRegs,
- MacroAssembler::kFallThroughAtEnd);
-
- __ bind(&no_remembered_set);
+ Label new_space;
+ __ InNewSpace(elements, temp, equal, &new_space);
+
+ __ movq(object, elements);
+ __ RecordWriteHelper(object, index_1, temp);
+ __ RecordWriteHelper(elements, index_2, temp);
+ __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ addq(rsp, Immediate(3 * kPointerSize));
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3879,14 +3833,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@@ -3925,11 +3875,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(rax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
- Split(equal, if_true, if_false, fall_through);
+ STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
+ Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
@@ -3947,7 +3895,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
- context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -3955,10 +3914,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
@@ -3968,6 +3923,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+ context()->Plug(if_true, if_false);
+ return;
+ }
+
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@@ -4055,9 +4017,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4065,20 +4026,14 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(sub_expr);
+ VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(rax, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
} else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
__ j(equal, if_true);
- __ CompareRoot(rax, other_nil_value);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// It can be an undetectable object.
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 27a96674cf..9d55594dcb 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -221,7 +221,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ movq(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
+ __ RecordWrite(elements, scratch1, scratch0);
}
@@ -606,40 +606,45 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
+ Label slow, slow_with_tagged_index, fast, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(r9, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow_with_tagged_index);
__ SmiToInteger32(rcx, rcx);
- __ CmpInstanceType(r9, JS_ARRAY_TYPE);
+ __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
- __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(rbx, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check array bounds.
+ // Check that the object is in fast mode and writable.
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &slow);
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
// rcx: index
- __ j(above, &fast_object_with_map_check);
+ __ j(above, &fast);
// Slow case: call runtime.
__ bind(&slow);
@@ -661,20 +666,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_extra_double);
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast_object_without_map_check);
-
- __ bind(&check_extra_double);
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -684,6 +678,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rdx: receiver (a JSArray)
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -691,45 +688,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ j(below_equal, &extra);
// Fast case: Do the store.
- __ bind(&fast_object_with_map_check);
+ __ bind(&fast);
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
- // rdx: receiver (a JSArray)
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
+ __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
__ ret(0);
-
__ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &slow, Label::kNear);
- __ lea(rcx,
- FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
+ // Slow case that needs to retain rcx for use by RecordWrite.
+ // Update write barrier for the elements array address.
__ movq(rdx, rax);
- __ RecordWrite(
- rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ bind(&fast_double_without_map_check);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow);
+ __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0);
}
@@ -874,22 +846,21 @@ static void GenerateCallMiss(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
+ // Push the receiver and the name of the function.
+ __ push(rdx);
+ __ push(rcx);
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
+ // Call the entry.
+ CEntryStub stub(1);
+ __ Set(rax, 2);
+ __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+ __ CallStub(&stub);
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- }
+ // Move result to rdi and exit the internal frame.
+ __ movq(rdi, rax);
+ __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -1031,14 +1002,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- }
+ __ EnterInternalFrame();
+ __ push(rcx); // save the key
+ __ push(rdx); // pass the receiver
+ __ push(rcx); // pass the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(rcx); // restore the key
+ __ LeaveInternalFrame();
__ movq(rdi, rax);
__ jmp(&do_call);
@@ -1242,12 +1212,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(mapped_location, rax);
__ lea(r9, mapped_location);
__ movq(r8, rax);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
+ __ RecordWrite(rbx, r9, r8);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1256,12 +1221,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(unmapped_location, rax);
__ lea(r9, unmapped_location);
__ movq(r8, rax);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
+ __ RecordWrite(rbx, r9, r8);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 45aaad7549..9064a266e7 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -81,12 +81,6 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -223,8 +217,11 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
}
}
Comment(";;; End allocate local context");
@@ -283,9 +280,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@@ -673,7 +667,7 @@ void LCodeGen::RecordSafepoint(
int deoptimization_index) {
ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
@@ -1583,33 +1577,30 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
+
int false_block = chunk_->LookupDestination(instr->false_block_id());
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
+ // If the expression is known to untagged or smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ // Jump directly to the false block.
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
+
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ CompareRoot(reg, other_nil_value);
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1761,40 +1752,30 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String> class_name,
Register input,
- Register temp,
- Register scratch) {
+ Register temp) {
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movq(scratch, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subb(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpb(scratch,
- Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)));
- __ j(above, is_false);
+ __ j(above_equal, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1823,7 +1804,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1832,7 +1812,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+ EmitClassOfTest(true_label, false_label, class_name, input, temp);
EmitBranch(true_block, false_block, equal);
}
@@ -1873,8 +1853,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+
Label* map_check() { return &map_check_; }
+
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -2015,7 +1996,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
__ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->check_hole_value()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
@@ -2035,39 +2016,25 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register object = ToRegister(instr->TempAt(0));
- Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0));
- ASSERT(!value.is(object));
- Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
-
- __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
-
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(!value.is(temp));
+ bool check_hole = instr->hydrogen()->check_hole_value();
+ if (!check_hole && value.is(rax)) {
+ __ store_rax(instr->hydrogen()->cell().location(),
+ RelocInfo::GLOBAL_PROPERTY_CELL);
+ return;
+ }
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex);
+ __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ if (check_hole) {
+ __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
-
- // Store the value.
- __ movq(Operand(address, 0), value);
-
- Label smi_store;
- __ JumpIfSmi(value, &smi_store, Label::kNear);
-
- int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
- __ lea(object, Operand(address, -offset));
- // Cells are always in the remembered set.
- __ RecordWrite(object,
- address,
- value,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ bind(&smi_store);
+ __ movq(Operand(temp, 0), value);
}
@@ -2097,7 +2064,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->TempAt(0));
- __ RecordWriteContextSlot(context, offset, value, scratch, kSaveFPRegs);
+ __ RecordWrite(context, offset, value, scratch);
}
}
@@ -2316,15 +2283,17 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- FAST_DOUBLE_ELEMENTS,
- offset);
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2396,7 +2365,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2713,7 +2681,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -3010,7 +2977,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+ CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Drop(1);
@@ -3066,7 +3033,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
+ __ RecordWrite(object, offset, value, temp);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
@@ -3075,7 +3042,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
+ __ RecordWrite(temp, offset, value, object);
}
}
}
@@ -3123,7 +3090,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3159,13 +3125,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- Condition cc = masm()->CheckSmi(value);
- DeoptimizeIf(NegateCondition(cc), instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3187,7 +3146,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
key,
times_pointer_size,
FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value, kSaveFPRegs);
+ __ RecordWrite(elements, key, value);
}
}
@@ -3237,7 +3196,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -3358,7 +3316,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3435,7 +3392,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3531,6 +3487,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+ LTaggedToI* instr_;
+};
+
+
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@@ -3579,16 +3545,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4025,12 +3981,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = equal;
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+ final_branch_condition = above_equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4156,7 +4109,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 106d7bb2e5..8cb4cece96 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -140,8 +140,7 @@ class LCodeGen BASE_EMBEDDED {
Label* if_false,
Handle<String> class_name,
Register input,
- Register temporary,
- Register scratch);
+ Register temporary);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
@@ -346,20 +345,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
+ : codegen_(codegen), external_exit_(NULL) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -370,7 +365,6 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
- int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index a67a59320e..5fc56462bb 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -214,11 +214,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
+ stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -707,9 +706,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ instr->set_environment(CreateEnvironment(hydrogen_env));
return instr;
}
@@ -992,13 +989,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1008,6 +1002,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_,
value_count,
outer);
+ int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1016,7 +1011,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
@@ -1441,10 +1436,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
@@ -1494,7 +1489,6 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
- TempRegister(),
TempRegister());
}
@@ -1722,7 +1716,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->RequiresHoleCheck()
+ return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1737,10 +1731,8 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new LStoreGlobalCell(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
+ return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index d43a86a9a5..d169bf6dfc 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -107,7 +107,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
+ V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -609,18 +609,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
+class LIsNullAndBranch: public LControlInstruction<1, 1> {
public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
+ LIsNullAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
+ bool is_strict() const { return hydrogen()->is_strict(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -706,12 +705,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -1199,12 +1197,11 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -2149,8 +2146,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 7fe6d5821e..9cfc9b6588 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -44,7 +44,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -197,47 +196,28 @@ void MacroAssembler::CompareRoot(const Operand& with,
}
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then) {
- if (FLAG_debug_code) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
- // Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
- // Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
- // Call stub on end of buffer.
- Label done;
- // Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- bind(&done);
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch) {
+ if (emit_debug_code()) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
}
+
+ // Compute the page start address from the heap object pointer, and reuse
+ // the 'object' register for it.
+ and_(object, Immediate(~Page::kPageAlignmentMask));
+
+ // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+ // method for more details.
+ shrl(addr, Immediate(Page::kRegionSizeLog2));
+ andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
+
+ // Set dirty mark for region.
+ bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
@@ -245,7 +225,7 @@ void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch,
- Label::Distance distance) {
+ Label::Distance near_jump) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
@@ -260,7 +240,7 @@ void MacroAssembler::InNewSpace(Register object,
}
movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
- j(cc, branch, distance);
+ j(cc, branch, near_jump);
} else {
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
@@ -272,128 +252,127 @@ void MacroAssembler::InNewSpace(Register object,
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, distance);
+ j(cc, branch, near_jump);
}
}
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register index) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
+ // catch stores of smis and stores into the young generation.
Label done;
+ JumpIfSmi(value, &done);
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
+ RecordWriteNonSmi(object, offset, value, index);
bind(&done);
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors. This clobbering repeats the
+ // clobbering done inside RecordWriteNonSmi but it's necessary to
+ // avoid having the fast case for smis leave the registers
+ // unchanged.
if (emit_debug_code()) {
+ movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ Register value) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+ JumpIfSmi(value, &done);
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
+ InNewSpace(object, value, equal, &done);
- if (FLAG_debug_code) {
- Label ok;
- cmpq(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
+ RecordWriteHelper(object, address, value);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
+}
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
+
+void MacroAssembler::RecordWriteNonSmi(Register object,
+ int offset,
+ Register scratch,
+ Register index) {
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done);
+ if (emit_debug_code()) {
+ Label okay;
+ JumpIfNotSmi(object, &okay, Label::kNear);
+ Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
+ bind(&okay);
+
+ if (offset == 0) {
+ // index must be int32.
+ Register tmp = index.is(rax) ? rbx : rax;
+ push(tmp);
+ movl(tmp, index);
+ cmpq(tmp, index);
+ Check(equal, "Index register for RecordWrite must be untagged int32.");
+ pop(tmp);
+ }
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ // Test that the object address is not in the new space. We cannot
+ // update page dirty marks for new space pages.
+ InNewSpace(object, scratch, equal, &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ // The offset is relative to a tagged or untagged HeapObject pointer,
+ // so either offset or offset + kHeapObjectTag must be a
+ // multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize) ||
+ IsAligned(offset + kHeapObjectTag, kPointerSize));
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
+ Register dst = index;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
+ } else {
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric.
+ lea(dst, FieldOperand(object,
+ index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ }
+ RecordWriteHelper(object, dst, scratch);
bind(&done);
- // Clobber clobbered registers when running with the debug-code flag
+ // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
-
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (emit_debug_code()) Check(cc, msg);
}
@@ -421,7 +400,7 @@ void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L, Label::kNear);
Abort(msg);
- // Control will not return here.
+ // will not return here
bind(&L);
}
@@ -469,6 +448,9 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
+
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
@@ -476,28 +458,20 @@ void MacroAssembler::Abort(const char* msg) {
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE);
push(kScratchRegister);
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // Control will not return here.
+ CallRuntime(Runtime::kAbort, 2);
+ // will not return here
int3();
}
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
MaybeObject* result = stub->TryGetCode();
if (!result->IsFailure()) {
call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
@@ -508,12 +482,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
MaybeObject* result = stub->TryGetCode();
if (!result->IsFailure()) {
jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
@@ -529,12 +504,6 @@ void MacroAssembler::StubReturn(int argc) {
}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -571,7 +540,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
+ CEntryStub ces(1);
+ ces.SaveDoubles();
CallStub(&ces);
}
@@ -825,8 +795,8 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ // Calls are not allowed in some stubs.
+ ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
@@ -855,57 +825,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
}
-static const Register saved_regs[] =
- { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-
-
-void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- for (int i = 0; i < kNumberOfSavedRegs; i++) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
- }
- }
- // R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
- }
- }
-}
-
-
-void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) {
- if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
- }
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- }
- for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
- }
- }
-}
-
-
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -2648,91 +2567,13 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
- j(below_equal, fail, distance);
+ STATIC_ASSERT(FAST_ELEMENTS == 0);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastElementValue));
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register key,
- XMMRegister xmm_scratch,
- Label* fail) {
- Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
-
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmpl(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
- xmm_scratch);
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- // Convert all NaNs to the same canonical NaN value when they are stored in
- // the double array.
- Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- movq(xmm_scratch, kScratchRegister);
- jmp(&have_double_value, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
- xmm_scratch);
- bind(&done);
-}
-
-
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -2946,10 +2787,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -2975,9 +2816,6 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
InvokePrologue(expected,
actual,
@@ -3009,9 +2847,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
Label done;
Register dummy = rax;
InvokePrologue(expected,
@@ -3042,9 +2877,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -3064,9 +2896,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@@ -3930,20 +3759,6 @@ void MacroAssembler::CopyBytes(Register destination,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- jmp(&entry);
- bind(&loop);
- movq(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
- bind(&entry);
- cmpq(start_offset, end_offset);
- j(less, &loop);
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -4043,7 +3858,6 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- ASSERT(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
@@ -4058,17 +3872,6 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
@@ -4089,195 +3892,6 @@ CodePatcher::~CodePatcher() {
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- testb(Operand(scratch, MemoryChunk::kFlagsOffset),
- Immediate(static_cast<uint8_t>(mask)));
- } else {
- testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* on_black,
- Label::Distance on_black_distance) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- // The mask_scratch register contains a 1 at the position of the first bit
- // and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
- // Make rcx into a mask that covers both marking bits using the operation
- // rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
- // Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
- j(equal, on_black, on_black_distance);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
- Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance) {
- Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- j(equal, &is_data_object, Label::kNear);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, not_data_object, not_data_object_distance);
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
- // Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shrl(rcx, Immediate(shift));
- and_(rcx,
- Immediate((Page::kPageAlignmentMask >> shift) &
- ~(Bitmap::kBytesPerCell - 1)));
-
- addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
- shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(not_zero, &done, Label::kNear);
-
- if (FLAG_debug_code) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = rcx; // Holds map while checking type.
- Register length = rcx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(not_equal, &not_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = rcx;
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- testb(instance_type, Immediate(kExternalStringTag));
- j(zero, &not_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
- bind(&done);
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 7e0ba00546..e7eb104c0b 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -29,7 +29,6 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "assembler.h"
-#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -62,11 +61,6 @@ static const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
// Forward declaration.
class JumpTarget;
@@ -78,7 +72,6 @@ struct SmiIndex {
ScaleFactor scale;
};
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -141,145 +134,56 @@ class MacroAssembler: public Assembler {
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
- // These functions do not arrange the registers in any particular order so
- // they are not useful for calls that can cause a GC. The caller can
- // exclude up to 3 registers that do not need to be saved and restored.
- void PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
-
-// ---------------------------------------------------------------------------
-// GC Support
-
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_equal, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, equal, branch, distance);
- }
-
- // Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Detects conservatively whether an object is data-only, ie it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check);
- }
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ // For page containing |object| mark region covering |addr| dirty.
+ // RecordWriteHelper only works if the object is not in new
+ // space.
+ void RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch);
+
+ // Check if object is in new space. The condition cc can be equal or
+ // not_equal. If it is equal a jump will be done if the object is on new
+ // space. The register scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance near_jump = Label::kFar);
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a
- // Smi. All registers are clobbered by the operation RecordWriteArray
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark region covering |address|
+ // For page containing |object| mark region covering [object+offset]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. If |offset| is zero, then the |scratch|
+ // register contains the array index into the elements array
+ // represented as an untagged 32-bit integer. All registers are
+ // clobbered by the operation. RecordWrite filters out smis so it
+ // does not update the write barrier if the value is a smi.
+ void RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch);
+
+ // For page containing |object| mark region covering [address]
// dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
+ // object being stored. All registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update
// the write barrier if the value is a smi.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ void RecordWrite(Register object,
+ Register address,
+ Register value);
+
+ // For page containing |object| mark region covering [object+offset] dirty.
+ // The value is known to not be a smi.
+ // object is the object being stored into, value is the object being stored.
+ // If offset is zero, then the scratch register contains the array index into
+ // the elements array represented as an untagged 32-bit integer.
+ // All registers are clobbered by the operation.
+ void RecordWriteNonSmi(Register object,
+ int offset,
+ Register value,
+ Register scratch);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -288,6 +192,15 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // ---------------------------------------------------------------------------
+ // Activation frames
+
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -847,28 +760,6 @@ class MacroAssembler: public Assembler {
Label* fail,
Label::Distance distance = Label::kFar);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- // Note that key must not be smi-tagged.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- XMMRegister xmm_scratch,
- Label* fail);
-
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
@@ -1228,13 +1119,6 @@ class MacroAssembler: public Assembler {
int min_length = 0,
Register scratch = kScratchRegister);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1267,18 +1151,11 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1288,7 +1165,6 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
- bool has_frame_;
bool root_array_available_;
// Returns a register holding the smi value. The register MUST NOT be
@@ -1312,6 +1188,10 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper = NullCallWrapper(),
CallKind call_kind = CALL_AS_METHOD);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
void EnterExitFramePrologue(bool save_rax);
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1338,20 +1218,6 @@ class MacroAssembler: public Assembler {
Register scratch,
bool gc_allowed);
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses rcx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 55fabc0036..a782bd7052 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+ __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -431,14 +431,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address());
#endif
-
- { // NOLINT: Can't find a way to open this scope without confusing the
- // linter.
- AllowExternalCallThatCantCauseGC scope(&masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
- __ CallCFunction(compare, num_arguments);
- }
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+ __ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -711,12 +706,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// registers we need.
// Entry code:
__ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(&masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
+ // Start new stack frame.
__ push(rbp);
__ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index c4b2672f60..76d2555798 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -645,7 +645,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
scratch1, scratch2, scratch3, name,
miss_label);
- FrameScope scope(masm, StackFrame::INTERNAL);
+ __ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@@ -662,8 +662,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
-
- // Leave the internal frame.
+ __ LeaveInternalFrame();
}
void LoadWithInterceptor(MacroAssembler* masm,
@@ -671,21 +670,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register holder,
JSObject* holder_obj,
Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
__ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(not_equal, interceptor_succeeded);
@@ -784,8 +781,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
+ __ RecordWrite(receiver_reg, offset, name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -796,8 +792,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movq(name_reg, rax);
- __ RecordWriteField(
- scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg);
}
// Return the value (register rax).
@@ -1144,43 +1139,41 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ push(receiver);
- }
- __ push(holder_reg);
- __ push(name_reg);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
+ __ EnterInternalFrame();
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
- // Leave the internal frame.
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
}
+ __ LeaveInternalFrame();
+
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
if (interceptor_holder != lookup->holder()) {
@@ -1428,7 +1421,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1442,40 +1435,30 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements);
- // Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element.
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
__ lea(rdx, FieldOperand(rbx,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx);
+ // Check if value is a smi.
__ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
- __ bind(&with_write_barrier);
+ __ JumpIfNotSmi(rcx, &with_write_barrier);
- __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rdi, &call_builtin);
+ __ bind(&exit);
+ __ ret((argc + 1) * kPointerSize);
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ bind(&with_write_barrier);
- // Push the element.
- __ lea(rdx, FieldOperand(rbx,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
+ __ InNewSpace(rbx, rcx, equal, &exit);
- __ RecordWrite(
- rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ RecordWriteHelper(rbx, rdx, rcx);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1483,15 +1466,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ jmp(&call_builtin);
}
- __ movq(rdi, Operand(rsp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rdi, &no_fast_elements_check);
- __ movq(rsi, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rsi, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
@@ -1515,22 +1489,16 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// We fit and could grow elements.
__ Store(new_space_allocation_top, rcx);
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
// Push the argument...
- __ movq(Operand(rdx, 0), rdi);
+ __ movq(Operand(rdx, 0), rcx);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
}
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
// Restore receiver to rdx as finish sequence assumes it's here.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1542,6 +1510,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
@@ -2494,36 +2463,19 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Handle<Map>(object->map()));
__ j(not_equal, &miss);
- // Compute the cell operand to use.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
- Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
-
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
+ __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
// Store the value in the cell.
- __ movq(cell_operand, rax);
- Label done;
- __ JumpIfSmi(rax, &done);
-
- __ movq(rcx, rax);
- __ lea(rdx, cell_operand);
- // Cells are always in the remembered set.
- __ RecordWrite(rbx, // Object.
- rdx, // Address.
- rcx, // Value.
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
+ __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
- __ bind(&done);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@@ -2603,10 +2555,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
MapList* receiver_maps,
- CodeList* handler_stubs,
- MapList* transitioned_maps) {
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2614,25 +2565,18 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ JumpIfSmi(rdx, &miss, Label::kNear);
+ __ JumpIfSmi(rdx, &miss);
- __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ Register map_reg = rbx;
+ __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
+ for (int current = 0; current < receiver_count; ++current) {
// Check map and tail call if there's a match
- Handle<Map> map(receiver_maps->at(i));
- __ Cmp(rdi, map);
- if (transitioned_maps->at(i) == NULL) {
- __ j(equal, Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ movq(rbx,
- Handle<Map>(transitioned_maps->at(i)),
- RelocInfo::EMBEDDED_OBJECT);
- __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
+ Handle<Map> map(receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ __ j(equal,
+ Handle<Code>(handler_ics->at(current)),
+ RelocInfo::CODE_TARGET);
}
__ bind(&miss);
@@ -3068,7 +3012,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@@ -3492,7 +3436,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3560,7 +3503,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3692,17 +3634,15 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3725,22 +3665,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(above_equal, &miss_force_generic);
}
- if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(elements_kind == FAST_ELEMENTS);
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rdx, rax);
- __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs);
- }
+ // Do the store and update the write barrier. Make sure to preserve
+ // the value in register eax.
+ __ movq(rdx, rax);
+ __ SmiToInteger32(rcx, rcx);
+ __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ RecordWrite(rdi, 0, rdx, rcx);
// Done.
__ ret(0);
@@ -3750,10 +3681,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -3766,7 +3693,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, smi_value, is_nan, maybe_nan;
+ Label have_double_value, not_nan;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3787,9 +3715,50 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ j(above_equal, &miss_force_generic);
// Handle smi values specially
+ __ JumpIfSmi(rax, &smi_value, Label::kNear);
+
+ __ CheckMap(rax,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ __ cmpl(FieldOperand(rax, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ __ j(greater_equal, &maybe_nan, Label::kNear);
+
+ __ bind(&not_nan);
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ SmiToInteger32(rcx, rcx);
+ __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ ret(0);
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ j(greater, &is_nan, Label::kNear);
+ __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
+ __ j(zero, &not_nan);
+ __ bind(&is_nan);
+ // Convert all NaNs to the same canonical NaN value when they are stored in
+ // the double array.
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ movq(xmm0, kScratchRegister);
+ __ jmp(&have_double_value, Label::kNear);
+
+ __ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ // Preserve original value.
+ __ SmiToInteger32(rdx, rax);
+ __ push(rdx);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(rdx);
__ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &transition_elements_kind);
+ __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3797,12 +3766,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic_force_generic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index efcbad7169..5d0cab3e98 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -91,8 +91,7 @@
'test-threads.cc',
'test-unbound-queue.cc',
'test-utils.cc',
- 'test-version.cc',
- 'test-weakmaps.cc'
+ 'test-version.cc'
],
'conditions': [
['v8_target_arch=="ia32"', {
@@ -135,12 +134,6 @@
'sources': [
'test-platform-win32.cc',
],
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- # MSVS wants this for gay-{precision,shortest}.cc.
- 'AdditionalOptions': ['/bigobj'],
- },
- },
}],
['component=="shared_library"', {
# cctest can't be built against a shared library, so we need to
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 759f69f339..5122da5ae3 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -33,28 +33,14 @@ test-api/Bug*: FAIL
# BUG(382): Weird test. Can't guarantee that it never times out.
test-api/ApplyInterruption: PASS || TIMEOUT
-# BUG(484): This test which we thought was originally corrected in r5236
-# is re-appearing. Disabled until bug in test is fixed. This only fails
-# when snapshot is on, so I am marking it PASS || FAIL
-test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
-
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
test-serialize/TestThatAlwaysFails: FAIL
test-serialize/DependentTestThatAlwaysFails: FAIL
-# TODO(gc): Temporarily disabled in the GC branch.
-test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
-
-# BUG(1261): Flakey test.
-test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
-
# We do not yet shrink weak maps after they have been emptied by the GC
test-weakmaps/Shrinking: FAIL
-# NewGC: BUG(1717)
-test-api/OutOfMemoryNested: PASS || TIMEOUT
-
##############################################################################
[ $arch == arm ]
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index b1900f9ed3..d95536d2d5 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -241,7 +241,7 @@ static v8::Handle<Value> CheckAccessorArgsCorrect(Local<String> name,
ApiTestFuzzer::Fuzz();
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
CHECK(info.This() == info.Holder());
CHECK(info.Data()->Equals(v8::String::New("data")));
return v8::Integer::New(17);
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 899c9021ff..97671923d9 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -72,29 +72,11 @@ static MaybeObject* AllocateAfterFailures() {
}
CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
- // Old pointer space.
- OldSpace* old_pointer_space = heap->old_pointer_space();
- static const int kOldPointerSpaceFillerLength = 10000;
- static const int kOldPointerSpaceFillerSize = FixedArray::SizeFor(
- kOldPointerSpaceFillerLength);
- while (old_pointer_space->Available() > kOldPointerSpaceFillerSize) {
- CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
- IsFailure());
- }
- CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
- IsFailure());
-
// Large object space.
- static const int kLargeObjectSpaceFillerLength = 300000;
- static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
- kLargeObjectSpaceFillerLength);
- ASSERT(kLargeObjectSpaceFillerSize > heap->MaxObjectSizeInPagedSpace());
- while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
- CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
- IsFailure());
+ while (!heap->OldGenerationAllocationLimitReached()) {
+ CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
}
- CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
- IsFailure());
+ CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
// Map space.
MapSpace* map_space = heap->map_space();
@@ -193,16 +175,16 @@ unsigned int Pseudorandom() {
// Plain old data class. Represents a block of allocated memory.
class Block {
public:
- Block(Address base_arg, int size_arg)
+ Block(void* base_arg, int size_arg)
: base(base_arg), size(size_arg) {}
- Address base;
+ void *base;
int size;
};
TEST(CodeRange) {
- const int code_range_size = 32*MB;
+ const int code_range_size = 16*MB;
OS::Setup();
Isolate::Current()->InitializeLoggingAndCounters();
CodeRange* code_range = new CodeRange(Isolate::Current());
@@ -214,13 +196,11 @@ TEST(CodeRange) {
while (total_allocated < 5 * code_range_size) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
- // Geometrically distributed sizes, greater than Page::kMaxHeapObjectSize.
- // TODO(gc): instead of using 3 use some contant based on code_range_size
- // kMaxHeapObjectSize.
- size_t requested = (Page::kMaxHeapObjectSize << (Pseudorandom() % 3)) +
+ // Geometrically distributed sizes, greater than Page::kPageSize.
+ size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
- Address base = code_range->AllocateRawMemory(requested, &allocated);
+ void* base = code_range->AllocateRawMemory(requested, &allocated);
CHECK(base != NULL);
blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 167c4cd155..c1c8aae592 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -80,11 +80,6 @@ static void ExpectString(const char* code, const char* expected) {
CHECK_EQ(expected, *ascii);
}
-static void ExpectInt32(const char* code, int expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->IsInt32());
- CHECK_EQ(expected, result->Int32Value());
-}
static void ExpectBoolean(const char* code, bool expected) {
Local<Value> result = CompileRun(code);
@@ -398,11 +393,11 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, dispose_count);
}
v8::internal::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, dispose_count);
}
@@ -420,11 +415,11 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, dispose_count);
}
i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, dispose_count);
}
@@ -446,12 +441,11 @@ THREADED_TEST(ScriptMakingExternalString) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, dispose_count);
}
i::Isolate::Current()->compilation_cache()->Clear();
- // TODO(1608): This should use kAbortIncrementalMarking.
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, dispose_count);
}
@@ -473,12 +467,11 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, dispose_count);
}
i::Isolate::Current()->compilation_cache()->Clear();
- // TODO(1608): This should use kAbortIncrementalMarking.
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, dispose_count);
}
@@ -579,8 +572,8 @@ THREADED_TEST(UsingExternalString) {
i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
CHECK(isymbol->IsSymbol());
}
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
@@ -597,8 +590,8 @@ THREADED_TEST(UsingExternalAsciiString) {
i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
CHECK(isymbol->IsSymbol());
}
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
@@ -679,11 +672,11 @@ TEST(ExternalStringWithDisposeHandling) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllAvailableGarbage();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
}
i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
@@ -700,11 +693,11 @@ TEST(ExternalStringWithDisposeHandling) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- HEAP->CollectAllAvailableGarbage();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
}
i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllAvailableGarbage();
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
}
@@ -751,8 +744,8 @@ THREADED_TEST(StringConcat) {
CHECK_EQ(68, value->Int32Value());
}
i::Isolate::Current()->compilation_cache()->Clear();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
}
@@ -1301,197 +1294,6 @@ static v8::Handle<Value> EchoNamedProperty(Local<String> name,
return name;
}
-// Helper functions for Interceptor/Accessor interaction tests
-
-Handle<Value> SimpleAccessorGetter(Local<String> name,
- const AccessorInfo& info) {
- Handle<Object> self = info.This();
- return self->Get(String::Concat(v8_str("accessor_"), name));
-}
-
-void SimpleAccessorSetter(Local<String> name, Local<Value> value,
- const AccessorInfo& info) {
- Handle<Object> self = info.This();
- self->Set(String::Concat(v8_str("accessor_"), name), value);
-}
-
-Handle<Value> EmptyInterceptorGetter(Local<String> name,
- const AccessorInfo& info) {
- return Handle<Value>();
-}
-
-Handle<Value> EmptyInterceptorSetter(Local<String> name,
- Local<Value> value,
- const AccessorInfo& info) {
- return Handle<Value>();
-}
-
-Handle<Value> InterceptorGetter(Local<String> name,
- const AccessorInfo& info) {
- // Intercept names that start with 'interceptor_'.
- String::AsciiValue ascii(name);
- char* name_str = *ascii;
- char prefix[] = "interceptor_";
- int i;
- for (i = 0; name_str[i] && prefix[i]; ++i) {
- if (name_str[i] != prefix[i]) return Handle<Value>();
- }
- Handle<Object> self = info.This();
- return self->GetHiddenValue(v8_str(name_str + i));
-}
-
-Handle<Value> InterceptorSetter(Local<String> name,
- Local<Value> value,
- const AccessorInfo& info) {
- // Intercept accesses that set certain integer values.
- if (value->IsInt32() && value->Int32Value() < 10000) {
- Handle<Object> self = info.This();
- self->SetHiddenValue(name, value);
- return value;
- }
- return Handle<Value>();
-}
-
-void AddAccessor(Handle<FunctionTemplate> templ,
- Handle<String> name,
- v8::AccessorGetter getter,
- v8::AccessorSetter setter) {
- templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
-}
-
-void AddInterceptor(Handle<FunctionTemplate> templ,
- v8::NamedPropertyGetter getter,
- v8::NamedPropertySetter setter) {
- templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
-}
-
-THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddAccessor(parent, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "child.age = 10;");
- ExpectBoolean("child.hasOwnProperty('age')", false);
- ExpectInt32("child.age", 10);
- ExpectInt32("child.accessor_age", 10);
-}
-
-THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "var parent = child.__proto__;"
- "Object.defineProperty(parent, 'age', "
- " {get: function(){ return this.accessor_age; }, "
- " set: function(v){ this.accessor_age = v; }, "
- " enumerable: true, configurable: true});"
- "child.age = 10;");
- ExpectBoolean("child.hasOwnProperty('age')", false);
- ExpectInt32("child.age", 10);
- ExpectInt32("child.accessor_age", 10);
-}
-
-THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "var parent = child.__proto__;"
- "parent.name = 'Alice';");
- ExpectBoolean("child.hasOwnProperty('name')", false);
- ExpectString("child.name", "Alice");
- CompileRun("child.name = 'Bob';");
- ExpectString("child.name", "Bob");
- ExpectBoolean("child.hasOwnProperty('name')", true);
- ExpectString("parent.name", "Alice");
-}
-
-THREADED_TEST(SwitchFromInterceptorToAccessor) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddAccessor(parent, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 0; i <= 10000; i++) setAge(i);");
- // All i < 10000 go to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
- // The last i goes to the accessor.
- ExpectInt32("child.accessor_age", 10000);
-}
-
-THREADED_TEST(SwitchFromAccessorToInterceptor) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddAccessor(parent, v8_str("age"),
- SimpleAccessorGetter, SimpleAccessorSetter);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 20000; i >= 9999; i--) setAge(i);");
- // All i >= 10000 go to the accessor.
- ExpectInt32("child.accessor_age", 10000);
- // The last i goes to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
-}
-
-THREADED_TEST(SwitchFromInterceptorToProperty) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 0; i <= 10000; i++) setAge(i);");
- // All i < 10000 go to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
- // The last i goes to child's own property.
- ExpectInt32("child.age", 10000);
-}
-
-THREADED_TEST(SwitchFromPropertyToInterceptor) {
- v8::HandleScope scope;
- Handle<FunctionTemplate> parent = FunctionTemplate::New();
- Handle<FunctionTemplate> child = FunctionTemplate::New();
- child->Inherit(parent);
- AddInterceptor(child, InterceptorGetter, InterceptorSetter);
- LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- CompileRun("var child = new Child;"
- "function setAge(i){ child.age = i; };"
- "for(var i = 20000; i >= 9999; i--) setAge(i);");
- // All i >= 10000 go to child's own property.
- ExpectInt32("child.age", 10000);
- // The last i goes to the interceptor.
- ExpectInt32("child.interceptor_age", 9999);
-}
THREADED_TEST(NamedPropertyHandlerGetter) {
echo_named_call_count = 0;
@@ -1864,12 +1666,12 @@ THREADED_TEST(InternalFieldsNativePointers) {
// Check reading and writing aligned pointers.
obj->SetPointerInInternalField(0, aligned);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
// Check reading and writing unaligned pointers.
obj->SetPointerInInternalField(0, unaligned);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
delete[] data;
@@ -1895,19 +1697,19 @@ THREADED_TEST(InternalFieldsNativePointersAndExternal) {
CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
obj->SetPointerInInternalField(0, aligned);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
obj->SetPointerInInternalField(0, unaligned);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
obj->SetInternalField(0, v8::External::Wrap(aligned));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
obj->SetInternalField(0, v8::External::Wrap(unaligned));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
delete[] data;
@@ -1920,7 +1722,7 @@ THREADED_TEST(IdentityHash) {
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
Local<v8::Object> obj = v8::Object::New();
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
@@ -1930,7 +1732,7 @@ THREADED_TEST(IdentityHash) {
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
int hash3 = v8::Object::New()->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
@@ -1967,7 +1769,7 @@ THREADED_TEST(HiddenProperties) {
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
// Make sure delete of a non-existent hidden value works
CHECK(obj->DeleteHiddenValue(key));
@@ -1977,7 +1779,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
// Make sure we do not find the hidden property.
CHECK(!obj->Has(empty));
@@ -1988,7 +1790,7 @@ THREADED_TEST(HiddenProperties) {
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK_EQ(2003, obj->Get(empty)->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -1999,7 +1801,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->Delete(prop_name));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK(obj->DeleteHiddenValue(key));
CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -2106,30 +1908,19 @@ THREADED_TEST(GlobalHandle) {
}
-class WeakCallCounter {
- public:
- explicit WeakCallCounter(int id) : id_(id), number_of_weak_calls_(0) { }
- int id() { return id_; }
- void increment() { number_of_weak_calls_++; }
- int NumberOfWeakCalls() { return number_of_weak_calls_; }
- private:
- int id_;
- int number_of_weak_calls_;
-};
-
-
+static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(Persistent<Value> handle, void* id) {
- WeakCallCounter* counter = reinterpret_cast<WeakCallCounter*>(id);
- CHECK_EQ(1234, counter->id());
- counter->increment();
+ CHECK_EQ(reinterpret_cast<void*>(1234), id);
+ NumberOfWeakCalls++;
handle.Dispose();
}
-
THREADED_TEST(ApiObjectGroups) {
HandleScope scope;
LocalContext env;
+ NumberOfWeakCalls = 0;
+
Persistent<Object> g1s1;
Persistent<Object> g1s2;
Persistent<Object> g1c1;
@@ -2137,23 +1928,21 @@ THREADED_TEST(ApiObjectGroups) {
Persistent<Object> g2s2;
Persistent<Object> g2c1;
- WeakCallCounter counter(1234);
-
{
HandleScope scope;
g1s1 = Persistent<Object>::New(Object::New());
g1s2 = Persistent<Object>::New(Object::New());
g1c1 = Persistent<Object>::New(Object::New());
- g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
g2s1 = Persistent<Object>::New(Object::New());
g2s2 = Persistent<Object>::New(Object::New());
g2c1 = Persistent<Object>::New(Object::New());
- g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
}
Persistent<Object> root = Persistent<Object>::New(g1s1); // make a root.
@@ -2172,15 +1961,14 @@ THREADED_TEST(ApiObjectGroups) {
V8::AddObjectGroup(g2_objects, 2);
V8::AddImplicitReferences(g2s2, g2_children, 1);
}
- // Do a single full GC. Use kMakeHeapIterableMask to ensure that
- // incremental garbage collection is stopped.
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ // Do a full GC
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All object should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
+ CHECK_EQ(0, NumberOfWeakCalls);
// Weaken the root.
- root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
g1c1.ClearWeak();
@@ -2198,17 +1986,17 @@ THREADED_TEST(ApiObjectGroups) {
V8::AddImplicitReferences(g2s2, g2_children, 1);
}
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All objects should be gone. 5 global handles in total.
- CHECK_EQ(5, counter.NumberOfWeakCalls());
+ CHECK_EQ(5, NumberOfWeakCalls);
// And now make children weak again and collect them.
- g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
- CHECK_EQ(7, counter.NumberOfWeakCalls());
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+ CHECK_EQ(7, NumberOfWeakCalls);
}
@@ -2216,7 +2004,7 @@ THREADED_TEST(ApiObjectGroupsCycle) {
HandleScope scope;
LocalContext env;
- WeakCallCounter counter(1234);
+ NumberOfWeakCalls = 0;
Persistent<Object> g1s1;
Persistent<Object> g1s2;
@@ -2229,18 +2017,18 @@ THREADED_TEST(ApiObjectGroupsCycle) {
HandleScope scope;
g1s1 = Persistent<Object>::New(Object::New());
g1s2 = Persistent<Object>::New(Object::New());
- g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
g2s1 = Persistent<Object>::New(Object::New());
g2s2 = Persistent<Object>::New(Object::New());
- g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
g3s1 = Persistent<Object>::New(Object::New());
g3s2 = Persistent<Object>::New(Object::New());
- g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+ g3s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
}
Persistent<Object> root = Persistent<Object>::New(g1s1); // make a root.
@@ -2262,14 +2050,14 @@ THREADED_TEST(ApiObjectGroupsCycle) {
V8::AddObjectGroup(g3_objects, 2);
V8::AddImplicitReferences(g3s1, g3_children, 1);
}
- // Do a single full GC
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ // Do a full GC
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All object should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
+ CHECK_EQ(0, NumberOfWeakCalls);
// Weaken the root.
- root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
// Groups are deleted, rebuild groups.
{
@@ -2287,10 +2075,10 @@ THREADED_TEST(ApiObjectGroupsCycle) {
V8::AddImplicitReferences(g3s1, g3_children, 1);
}
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
// All objects should be gone. 7 global handles in total.
- CHECK_EQ(7, counter.NumberOfWeakCalls());
+ CHECK_EQ(7, NumberOfWeakCalls);
}
@@ -4517,47 +4305,6 @@ THREADED_TEST(SimpleExtensions) {
}
-static const char* kEmbeddedExtensionSource =
- "function Ret54321(){return 54321;}~~@@$"
- "$%% THIS IS A SERIES OF NON-NULL-TERMINATED STRINGS.";
-static const int kEmbeddedExtensionSourceValidLen = 34;
-
-
-THREADED_TEST(ExtensionMissingSourceLength) {
- v8::HandleScope handle_scope;
- v8::RegisterExtension(new Extension("srclentest_fail",
- kEmbeddedExtensionSource));
- const char* extension_names[] = { "srclentest_fail" };
- v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
- CHECK_EQ(0, *context);
-}
-
-
-THREADED_TEST(ExtensionWithSourceLength) {
- for (int source_len = kEmbeddedExtensionSourceValidLen - 1;
- source_len <= kEmbeddedExtensionSourceValidLen + 1; ++source_len) {
- v8::HandleScope handle_scope;
- i::ScopedVector<char> extension_name(32);
- i::OS::SNPrintF(extension_name, "ext #%d", source_len);
- v8::RegisterExtension(new Extension(extension_name.start(),
- kEmbeddedExtensionSource, 0, 0,
- source_len));
- const char* extension_names[1] = { extension_name.start() };
- v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
- if (source_len == kEmbeddedExtensionSourceValidLen) {
- Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
- CHECK_EQ(v8::Integer::New(54321), result);
- } else {
- // Anything but exactly the right length should fail to compile.
- CHECK_EQ(0, *context);
- }
- }
-}
-
-
static const char* kEvalExtensionSource1 =
"function UseEval1() {"
" var x = 42;"
@@ -5058,7 +4805,7 @@ static void InvokeScavenge() {
static void InvokeMarkSweep() {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
}
@@ -5151,7 +4898,7 @@ static v8::Handle<Value> ArgumentsTestCallback(const v8::Arguments& args) {
CHECK_EQ(v8::Integer::New(3), args[2]);
CHECK_EQ(v8::Undefined(), args[3]);
v8::HandleScope scope;
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
return v8::Undefined();
}
@@ -8136,7 +7883,7 @@ static v8::Handle<Value> InterceptorHasOwnPropertyGetterGC(
Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
return v8::Handle<Value>();
}
@@ -8866,7 +8613,7 @@ static v8::Handle<Value> InterceptorCallICFastApi(Local<String> name,
int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
++(*call_count);
if ((*call_count) % 20 == 0) {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
}
return v8::Handle<Value>();
}
@@ -10217,7 +9964,6 @@ THREADED_TEST(LockUnlockLock) {
static int GetGlobalObjectsCount() {
- i::Isolate::Current()->heap()->EnsureHeapIsIterable();
int count = 0;
i::HeapIterator it;
for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
@@ -10232,8 +9978,9 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->global_context_map();
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
int count = GetGlobalObjectsCount();
#ifdef DEBUG
if (count != expected) HEAP->TracePathToGlobal();
@@ -10302,7 +10049,7 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
// weak callback of the first handle would be able to 'reallocate' it.
handle1.MakeWeak(NULL, NewPersistentHandleCallback);
handle2.Dispose();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
}
@@ -10310,7 +10057,7 @@ v8::Persistent<v8::Object> to_be_disposed;
void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
to_be_disposed.Dispose();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
handle.Dispose();
}
@@ -10326,7 +10073,7 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
}
handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
to_be_disposed = handle2;
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
}
void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
@@ -10352,7 +10099,7 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
}
handle2.MakeWeak(NULL, DisposingCallback);
handle3.MakeWeak(NULL, HandleCreatingCallback);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
}
@@ -11168,7 +10915,7 @@ class RegExpInterruptTest {
{
v8::Locker lock;
// TODO(lrn): Perhaps create some garbage before collecting.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
gc_count_++;
}
i::OS::Sleep(1);
@@ -11290,7 +11037,7 @@ class ApplyInterruptTest {
while (gc_during_apply_ < kRequiredGCs) {
{
v8::Locker lock;
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
gc_count_++;
}
i::OS::Sleep(1);
@@ -12006,15 +11753,13 @@ THREADED_TEST(PixelArray) {
i::Handle<i::ExternalPixelArray> pixels =
i::Handle<i::ExternalPixelArray>::cast(
FACTORY->NewExternalArray(kElementCount,
- v8::kExternalPixelArray,
- pixel_data));
- // Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ v8::kExternalPixelArray,
+ pixel_data));
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i % 256);
}
- // Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(i % 256, pixels->get_scalar(i));
CHECK_EQ(i % 256, pixel_data[i]);
@@ -12490,13 +12235,11 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
i::Handle<ExternalArrayClass> array =
i::Handle<ExternalArrayClass>::cast(
FACTORY->NewExternalArray(kElementCount, array_type, array_data));
- // Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
array->set(i, static_cast<ElementType>(i));
}
- // Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(static_cast<int64_t>(i),
static_cast<int64_t>(array->get_scalar(i)));
@@ -12614,8 +12357,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
" }"
"}"
"sum;");
- // Force GC to trigger verification.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false); // Force GC to trigger verification.
CHECK_EQ(28, result->Int32Value());
// Make sure out-of-range loads do not throw.
@@ -13595,7 +13337,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_simple);
other_context->Exit();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -13617,7 +13359,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_eval);
other_context->Exit();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -13644,7 +13386,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_exception);
other_context->Exit();
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -13862,26 +13604,26 @@ TEST(GCCallbacks) {
v8::V8::AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
v8::V8::RemoveGCPrologueCallback(PrologueCallback);
v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
@@ -14098,7 +13840,7 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
void FailedAccessCheckCallbackGC(Local<v8::Object> target,
v8::AccessType type,
Local<v8::Value> data) {
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
}
@@ -14672,7 +14414,7 @@ TEST(DontDeleteCellLoadIC) {
"})()",
"ReferenceError: cell is not defined");
CompileRun("cell = \"new_second\";");
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
ExpectString("readCell()", "new_second");
ExpectString("readCell()", "new_second");
}
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index cdab8f7cb6..839b7f562e 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -93,15 +93,15 @@ TEST(AssemblerIa321) {
Label L, C;
__ mov(edx, Operand(esp, 4));
- __ xor_(eax, eax); // clear eax
+ __ xor_(eax, Operand(eax)); // clear eax
__ jmp(&C);
__ bind(&L);
- __ add(eax, edx);
- __ sub(edx, Immediate(1));
+ __ add(eax, Operand(edx));
+ __ sub(Operand(edx), Immediate(1));
__ bind(&C);
- __ test(edx, edx);
+ __ test(edx, Operand(edx));
__ j(not_zero, &L);
__ ret(0);
@@ -135,11 +135,11 @@ TEST(AssemblerIa322) {
__ jmp(&C);
__ bind(&L);
- __ imul(eax, edx);
- __ sub(edx, Immediate(1));
+ __ imul(eax, Operand(edx));
+ __ sub(Operand(edx), Immediate(1));
__ bind(&C);
- __ test(edx, edx);
+ __ test(edx, Operand(edx));
__ j(not_zero, &L);
__ ret(0);
@@ -275,10 +275,10 @@ TEST(AssemblerIa326) {
__ subsd(xmm0, xmm1);
__ divsd(xmm0, xmm1);
// Copy xmm0 to st(0) using eight bytes of stack.
- __ sub(esp, Immediate(8));
+ __ sub(Operand(esp), Immediate(8));
__ movdbl(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(8));
+ __ add(Operand(esp), Immediate(8));
__ ret(0);
CodeDesc desc;
@@ -314,12 +314,12 @@ TEST(AssemblerIa328) {
v8::internal::byte buffer[256];
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
- __ cvtsi2sd(xmm0, eax);
+ __ cvtsi2sd(xmm0, Operand(eax));
// Copy xmm0 to st(0) using eight bytes of stack.
- __ sub(esp, Immediate(8));
+ __ sub(Operand(esp), Immediate(8));
__ movdbl(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(8));
+ __ add(Operand(esp), Immediate(8));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index f5c01e6162..2d9b01204a 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -305,11 +305,10 @@ TEST(C2JSFrames) {
Handle<Object> fun1(fun1_object->ToObjectChecked());
CHECK(fun1->IsJSFunction());
- Handle<Object> argv[] = { FACTORY->LookupAsciiSymbol("hello") };
- Execution::Call(Handle<JSFunction>::cast(fun1),
- global,
- ARRAY_SIZE(argv),
- argv,
+ Object** argv[1] = {
+ Handle<Object>::cast(FACTORY->LookupAsciiSymbol("hello")).location()
+ };
+ Execution::Call(Handle<JSFunction>::cast(fun1), global, 1, argv,
&has_pending_exception);
CHECK(!has_pending_exception);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index de60d4999d..45da6dc0bd 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -425,8 +425,8 @@ void CheckDebuggerUnloaded(bool check_functions) {
CHECK_EQ(NULL, Isolate::Current()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(false);
+ HEAP->CollectAllGarbage(false);
// Iterate the head and check that there are no debugger related objects left.
HeapIterator iterator;
@@ -944,7 +944,7 @@ static void DebugEventBreakPointCollectGarbage(
HEAP->CollectGarbage(v8::internal::NEW_SPACE);
} else {
// Mark sweep compact.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
}
}
}
@@ -1417,7 +1417,8 @@ TEST(GCDuringBreakPointProcessing) {
// Call the function three times with different garbage collections in between
// and make sure that the break point survives.
static void CallAndGC(v8::Local<v8::Object> recv,
- v8::Local<v8::Function> f) {
+ v8::Local<v8::Function> f,
+ bool force_compaction) {
break_point_hit_count = 0;
for (int i = 0; i < 3; i++) {
@@ -1431,15 +1432,14 @@ static void CallAndGC(v8::Local<v8::Object> recv,
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(force_compaction);
f->Call(recv, 0, NULL);
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
}
-// Test that a break point can be set at a return store location.
-TEST(BreakPointSurviveGC) {
+static void TestBreakPointSurviveGC(bool force_compaction) {
break_point_hit_count = 0;
v8::HandleScope scope;
DebugLocalContext env;
@@ -1455,7 +1455,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(env->Global(), foo, force_compaction);
// Test IC load break point with garbage collection.
{
@@ -1464,7 +1464,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(env->Global(), foo, force_compaction);
// Test IC call break point with garbage collection.
{
@@ -1475,7 +1475,7 @@ TEST(BreakPointSurviveGC) {
"foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(env->Global(), foo, force_compaction);
// Test return break point with garbage collection.
{
@@ -1484,7 +1484,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "function foo(){}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(env->Global(), foo, force_compaction);
// Test non IC break point with garbage collection.
{
@@ -1493,7 +1493,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "function foo(){var bar=0;}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(env->Global(), foo, force_compaction);
v8::Debug::SetDebugEventListener(NULL);
@@ -1501,6 +1501,13 @@ TEST(BreakPointSurviveGC) {
}
+// Test that a break point can be set at a return store location.
+TEST(BreakPointSurviveGC) {
+ TestBreakPointSurviveGC(false);
+ TestBreakPointSurviveGC(true);
+}
+
+
// Test that break points can be set using the global Debug object.
TEST(BreakPointThroughJavaScript) {
break_point_hit_count = 0;
@@ -2252,7 +2259,7 @@ TEST(ScriptBreakPointLineTopLevel) {
}
f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
SetScriptBreakPointByNameFromJS("test.html", 3, -1);
@@ -6465,7 +6472,7 @@ TEST(ScriptCollectedEvent) {
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
script_collected_count = 0;
v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
@@ -6477,7 +6484,7 @@ TEST(ScriptCollectedEvent) {
// Do garbage collection to collect the script above which is no longer
// referenced.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, script_collected_count);
@@ -6513,7 +6520,7 @@ TEST(ScriptCollectedEventContext) {
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
{
@@ -6524,7 +6531,7 @@ TEST(ScriptCollectedEventContext) {
// Do garbage collection to collect the script above which is no longer
// referenced.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK_EQ(2, script_collected_message_count);
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index aa733c70bc..619839185e 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -232,7 +232,7 @@ TEST(Unknown) {
context.Check("const x; x",
1, // access
2, // declaration + initialization
- 1, // declaration
+ 2, // declaration + initialization
EXPECT_RESULT, Undefined());
}
@@ -240,7 +240,7 @@ TEST(Unknown) {
context.Check("const x = 0; x",
1, // access
2, // declaration + initialization
- 1, // declaration
+ 2, // declaration + initialization
EXPECT_RESULT, Undefined()); // SB 0 - BUG 1213579
}
}
@@ -285,18 +285,18 @@ TEST(Present) {
{ PresentPropertyContext context;
context.Check("const x; x",
- 1, // access
- 1, // initialization
+ 0,
+ 0,
1, // (re-)declaration
- EXPECT_RESULT, Undefined());
+ EXPECT_EXCEPTION); // x has already been declared!
}
{ PresentPropertyContext context;
context.Check("const x = 0; x",
- 1, // access
- 1, // initialization
+ 0,
+ 0,
1, // (re-)declaration
- EXPECT_RESULT, Number::New(0));
+ EXPECT_EXCEPTION); // x has already been declared!
}
}
@@ -341,7 +341,7 @@ TEST(Absent) {
context.Check("const x; x",
1, // access
2, // declaration + initialization
- 1, // declaration
+ 2, // declaration + initializetion
EXPECT_RESULT, Undefined());
}
@@ -349,7 +349,7 @@ TEST(Absent) {
context.Check("const x = 0; x",
1, // access
2, // declaration + initialization
- 1, // declaration
+ 2, // declaration + initialization
EXPECT_RESULT, Undefined()); // SB 0 - BUG 1213579
}
@@ -429,20 +429,18 @@ TEST(Appearing) {
{ AppearingPropertyContext context;
context.Check("const x; x",
- 1, // access
- 2, // declaration + initialization
+ 0,
1, // declaration
- EXPECT_RESULT, Undefined());
+ 2, // declaration + initialization
+ EXPECT_EXCEPTION); // x has already been declared!
}
{ AppearingPropertyContext context;
context.Check("const x = 0; x",
- 1, // access
- 2, // declaration + initialization
+ 0,
1, // declaration
- EXPECT_RESULT, Undefined());
- // Result is undefined because declaration succeeded but
- // initialization to 0 failed (due to context behavior).
+ 2, // declaration + initialization
+ EXPECT_EXCEPTION); // x has already been declared!
}
}
@@ -498,9 +496,9 @@ TEST(Reappearing) {
{ ReappearingPropertyContext context;
context.Check("const x; var x = 0",
0,
- 3, // const declaration+initialization, var initialization
- 3, // 2 x declaration + var initialization
- EXPECT_RESULT, Undefined());
+ 2, // var declaration + const initialization
+ 4, // 2 x declaration + 2 x initialization
+ EXPECT_EXCEPTION); // x has already been declared!
}
}
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 1e38e4ea72..9f7d0bb6e0 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -63,9 +63,9 @@ TEST(DisasmIa320) {
// Short immediate instructions
__ adc(eax, 12345678);
- __ add(eax, Immediate(12345678));
+ __ add(Operand(eax), Immediate(12345678));
__ or_(eax, 12345678);
- __ sub(eax, Immediate(12345678));
+ __ sub(Operand(eax), Immediate(12345678));
__ xor_(eax, 12345678);
__ and_(eax, 12345678);
Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
@@ -75,7 +75,7 @@ TEST(DisasmIa320) {
__ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4]
// ---- All instructions that I can think of
- __ add(edx, ebx);
+ __ add(edx, Operand(ebx));
__ add(edx, Operand(12, RelocInfo::NONE));
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
@@ -89,7 +89,7 @@ TEST(DisasmIa320) {
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
__ nop();
- __ add(ebx, Immediate(12));
+ __ add(Operand(ebx), Immediate(12));
__ nop();
__ adc(ecx, 12);
__ adc(ecx, 1000);
@@ -116,16 +116,16 @@ TEST(DisasmIa320) {
CpuFeatures::Scope fscope(RDTSC);
__ rdtsc();
}
- __ movsx_b(edx, ecx);
- __ movsx_w(edx, ecx);
- __ movzx_b(edx, ecx);
- __ movzx_w(edx, ecx);
+ __ movsx_b(edx, Operand(ecx));
+ __ movsx_w(edx, Operand(ecx));
+ __ movzx_b(edx, Operand(ecx));
+ __ movzx_w(edx, Operand(ecx));
__ nop();
- __ imul(edx, ecx);
- __ shld(edx, ecx);
- __ shrd(edx, ecx);
- __ bts(edx, ecx);
+ __ imul(edx, Operand(ecx));
+ __ shld(edx, Operand(ecx));
+ __ shrd(edx, Operand(ecx));
+ __ bts(Operand(edx), ecx);
__ bts(Operand(ebx, ecx, times_4, 0), ecx);
__ nop();
__ pushad();
@@ -146,9 +146,9 @@ TEST(DisasmIa320) {
__ nop();
__ add(edx, Operand(esp, 16));
- __ add(edx, ecx);
- __ mov_b(edx, ecx);
- __ mov_b(ecx, 6);
+ __ add(edx, Operand(ecx));
+ __ mov_b(edx, Operand(ecx));
+ __ mov_b(Operand(ecx), 6);
__ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
__ mov_b(Operand(esp, 16), edx);
__ mov_w(edx, Operand(esp, 16));
@@ -216,20 +216,22 @@ TEST(DisasmIa320) {
__ adc(edx, 12345);
- __ add(ebx, Immediate(12));
+ __ add(Operand(ebx), Immediate(12));
__ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ and_(ebx, 12345);
__ cmp(ebx, 12345);
- __ cmp(ebx, Immediate(12));
+ __ cmp(Operand(ebx), Immediate(12));
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
- __ cmpb(eax, 100);
__ or_(ebx, 12345);
- __ sub(ebx, Immediate(12));
+ __ sub(Operand(ebx), Immediate(12));
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
+ __ subb(Operand(edx, ecx, times_4, 10000), 100);
+ __ subb(Operand(eax), 100);
+ __ subb(eax, Operand(edx, ecx, times_4, 10000));
__ xor_(ebx, 12345);
@@ -242,7 +244,7 @@ TEST(DisasmIa320) {
__ stos();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
- __ sub(edx, ebx);
+ __ sub(edx, Operand(ebx));
__ test(edx, Immediate(12345));
__ test(edx, Operand(ebx, ecx, times_8, 10000));
@@ -444,8 +446,8 @@ TEST(DisasmIa320) {
{
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
- __ pextrd(eax, xmm0, 1);
- __ pinsrd(xmm1, eax, 0);
+ __ pextrd(Operand(eax), xmm0, 1);
+ __ pinsrd(xmm1, Operand(eax), 0);
}
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index d695d7438f..6c2afd47f7 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -294,7 +294,7 @@ TEST(HeapEntryIdsAndGC) {
const v8::HeapSnapshot* snapshot1 =
v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true); // Enforce compaction.
const v8::HeapSnapshot* snapshot2 =
v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 8ed5bf7668..11b8813063 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
@@ -672,8 +672,7 @@ TEST(JSArray) {
// Set array length to 0.
ok = array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
CHECK_EQ(Smi::FromInt(0), array->length());
- // Must be in fast mode.
- CHECK(array->HasFastTypeElements());
+ CHECK(array->HasFastElements()); // Must be in fast mode.
// array[length] = name.
ok = array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
@@ -839,6 +838,49 @@ TEST(Iteration) {
}
+TEST(LargeObjectSpaceContains) {
+ InitializeVM();
+
+ HEAP->CollectGarbage(NEW_SPACE);
+
+ Address current_top = HEAP->new_space()->top();
+ Page* page = Page::FromAddress(current_top);
+ Address current_page = page->address();
+ Address next_page = current_page + Page::kPageSize;
+ int bytes_to_page = static_cast<int>(next_page - current_top);
+ if (bytes_to_page <= FixedArray::kHeaderSize) {
+ // Alas, need to cross another page to be able to
+ // put desired value.
+ next_page += Page::kPageSize;
+ bytes_to_page = static_cast<int>(next_page - current_top);
+ }
+ CHECK(bytes_to_page > FixedArray::kHeaderSize);
+
+ intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
+ Address flags_addr = reinterpret_cast<Address>(flags_ptr);
+
+ int bytes_to_allocate =
+ static_cast<int>(flags_addr - current_top) + kPointerSize;
+
+ int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
+ kPointerSize;
+ CHECK_EQ(bytes_to_allocate, FixedArray::SizeFor(n_elements));
+ FixedArray* array = FixedArray::cast(
+ HEAP->AllocateFixedArray(n_elements)->ToObjectChecked());
+
+ int index = n_elements - 1;
+ CHECK_EQ(flags_ptr,
+ HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
+ array->set(index, Smi::FromInt(0));
+ // This chould have turned next page into LargeObjectPage:
+ // CHECK(Page::FromAddress(next_page)->IsLargeObjectPage());
+
+ HeapObject* addr = HeapObject::FromAddress(next_page + 2 * kPointerSize);
+ CHECK(HEAP->new_space()->Contains(addr));
+ CHECK(!HEAP->lo_space()->Contains(addr));
+}
+
+
TEST(EmptyHandleEscapeFrom) {
InitializeVM();
@@ -865,7 +907,8 @@ TEST(Regression39128) {
InitializeVM();
// Increase the chance of 'bump-the-pointer' allocation in old space.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ bool force_compaction = true;
+ HEAP->CollectAllGarbage(force_compaction);
v8::HandleScope scope;
@@ -932,6 +975,12 @@ TEST(Regression39128) {
return;
}
CHECK(HEAP->old_pointer_space()->Contains(clone->address()));
+
+ // Step 5: verify validity of region dirty marks.
+ Address clone_addr = clone->address();
+ Page* page = Page::FromAddress(clone_addr);
+ // Check that region covering inobject property 1 is marked dirty.
+ CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
}
@@ -961,18 +1010,17 @@ TEST(TestCodeFlushing) {
Handle<JSFunction> function(JSFunction::cast(func_value));
CHECK(function->shared()->is_compiled());
- // TODO(1609) Currently incremental marker does not support code flushing.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
CHECK(function->shared()->is_compiled());
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
// foo should no longer be in the compilation cache
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
@@ -1061,7 +1109,7 @@ TEST(TestInternalWeakLists) {
}
// Mark compact handles the weak references.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
@@ -1070,21 +1118,21 @@ TEST(TestInternalWeakLists) {
HEAP->PerformScavenge();
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
HEAP->PerformScavenge();
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
}
// Force compilation cache cleanup.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
// Dispose the global contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
@@ -1098,7 +1146,7 @@ TEST(TestInternalWeakLists) {
}
// Mark compact handles the weak references.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
}
@@ -1113,7 +1161,7 @@ static int CountGlobalContextsWithGC(int n) {
Handle<Object> object(HEAP->global_contexts_list());
while (!object->IsUndefined()) {
count++;
- if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ if (count == n) HEAP->CollectAllGarbage(true);
object =
Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
}
@@ -1132,7 +1180,7 @@ static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
while (object->IsJSFunction() &&
!Handle<JSFunction>::cast(object)->IsBuiltin()) {
count++;
- if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ if (count == n) HEAP->CollectAllGarbage(true);
object = Handle<Object>(
Object::cast(JSFunction::cast(*object)->next_function_link()));
}
@@ -1192,84 +1240,90 @@ TEST(TestInternalWeakListsTraverseWithGC) {
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
InitializeVM();
- HEAP->EnsureHeapIsIterable();
intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
- HeapIterator iterator;
+ HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
intptr_t size_of_objects_2 = 0;
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
size_of_objects_2 += obj->Size();
}
- // Delta must be within 5% of the larger result.
- // TODO(gc): Tighten this up by distinguishing between byte
- // arrays that are real and those that merely mark free space
- // on the heap.
+ // Delta must be within 1% of the larger result.
if (size_of_objects_1 > size_of_objects_2) {
intptr_t delta = size_of_objects_1 - size_of_objects_2;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
"Iterator: %" V8_PTR_PREFIX "d, "
"delta: %" V8_PTR_PREFIX "d\n",
size_of_objects_1, size_of_objects_2, delta);
- CHECK_GT(size_of_objects_1 / 20, delta);
+ CHECK_GT(size_of_objects_1 / 100, delta);
} else {
intptr_t delta = size_of_objects_2 - size_of_objects_1;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
"Iterator: %" V8_PTR_PREFIX "d, "
"delta: %" V8_PTR_PREFIX "d\n",
size_of_objects_1, size_of_objects_2, delta);
- CHECK_GT(size_of_objects_2 / 20, delta);
+ CHECK_GT(size_of_objects_2 / 100, delta);
}
}
-TEST(GrowAndShrinkNewSpace) {
- InitializeVM();
- NewSpace* new_space = HEAP->new_space();
-
- // Explicitly growing should double the space capacity.
- intptr_t old_capacity, new_capacity;
- old_capacity = new_space->Capacity();
- new_space->Grow();
- new_capacity = new_space->Capacity();
- CHECK(2 * old_capacity == new_capacity);
-
- // Fill up new space to the point that it is completely full. Make sure
- // that the scavenger does not undo the filling.
- old_capacity = new_space->Capacity();
- {
- v8::HandleScope scope;
- AlwaysAllocateScope always_allocate;
- intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
- intptr_t number_of_fillers = (available / FixedArray::SizeFor(1000)) - 10;
- for (intptr_t i = 0; i < number_of_fillers; i++) {
- CHECK(HEAP->InNewSpace(*FACTORY->NewFixedArray(1000, NOT_TENURED)));
+class HeapIteratorTestHelper {
+ public:
+ HeapIteratorTestHelper(Object* a, Object* b)
+ : a_(a), b_(b), a_found_(false), b_found_(false) {}
+ bool a_found() { return a_found_; }
+ bool b_found() { return b_found_; }
+ void IterateHeap(HeapIterator::HeapObjectsFiltering mode) {
+ HeapIterator iterator(mode);
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ if (obj == a_)
+ a_found_ = true;
+ else if (obj == b_)
+ b_found_ = true;
}
}
- new_capacity = new_space->Capacity();
- CHECK(old_capacity == new_capacity);
-
- // Explicitly shrinking should not affect space capacity.
- old_capacity = new_space->Capacity();
- new_space->Shrink();
- new_capacity = new_space->Capacity();
- CHECK(old_capacity == new_capacity);
+ private:
+ Object* a_;
+ Object* b_;
+ bool a_found_;
+ bool b_found_;
+};
- // Let the scavenger empty the new space.
- HEAP->CollectGarbage(NEW_SPACE);
- CHECK_LE(new_space->Size(), old_capacity);
-
- // Explicitly shrinking should halve the space capacity.
- old_capacity = new_space->Capacity();
- new_space->Shrink();
- new_capacity = new_space->Capacity();
- CHECK(old_capacity == 2 * new_capacity);
-
- // Consecutive shrinking should not affect space capacity.
- old_capacity = new_space->Capacity();
- new_space->Shrink();
- new_space->Shrink();
- new_space->Shrink();
- new_capacity = new_space->Capacity();
- CHECK(old_capacity == new_capacity);
+TEST(HeapIteratorFilterUnreachable) {
+ InitializeVM();
+ v8::HandleScope scope;
+ CompileRun("a = {}; b = {};");
+ v8::Handle<Object> a(ISOLATE->context()->global()->GetProperty(
+ *FACTORY->LookupAsciiSymbol("a"))->ToObjectChecked());
+ v8::Handle<Object> b(ISOLATE->context()->global()->GetProperty(
+ *FACTORY->LookupAsciiSymbol("b"))->ToObjectChecked());
+ CHECK_NE(*a, *b);
+ {
+ HeapIteratorTestHelper helper(*a, *b);
+ helper.IterateHeap(HeapIterator::kFilterUnreachable);
+ CHECK(helper.a_found());
+ CHECK(helper.b_found());
+ }
+ CHECK(ISOLATE->context()->global()->DeleteProperty(
+ *FACTORY->LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
+ // We ensure that GC will not happen, so our raw pointer stays valid.
+ AssertNoAllocation no_alloc;
+ Object* a_saved = *a;
+ a.Clear();
+ // Verify that "a" object still resides in the heap...
+ {
+ HeapIteratorTestHelper helper(a_saved, *b);
+ helper.IterateHeap(HeapIterator::kNoFiltering);
+ CHECK(helper.a_found());
+ CHECK(helper.b_found());
+ }
+ // ...but is now unreachable.
+ {
+ HeapIteratorTestHelper helper(a_saved, *b);
+ helper.IterateHeap(HeapIterator::kFilterUnreachable);
+ CHECK(!helper.a_found());
+ CHECK(helper.b_found());
+ }
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 6f2324dbb8..72e663c462 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -494,7 +494,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
v8::V8::PauseProfiler();
- HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+ HEAP->CollectAllGarbage(true);
LOGGER->StringEvent("test-logging-done", "");
// Iterate heap to find compiled functions, will write to log.
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index e99e1e5de0..dcb51a0bcb 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -44,21 +44,21 @@ static void InitializeVM() {
}
-TEST(MarkingDeque) {
+TEST(MarkingStack) {
int mem_size = 20 * kPointerSize;
byte* mem = NewArray<byte>(20*kPointerSize);
Address low = reinterpret_cast<Address>(mem);
Address high = low + mem_size;
- MarkingDeque s;
+ MarkingStack s;
s.Initialize(low, high);
Address address = NULL;
- while (!s.IsFull()) {
- s.PushBlack(HeapObject::FromAddress(address));
+ while (!s.is_full()) {
+ s.Push(HeapObject::FromAddress(address));
address += kPointerSize;
}
- while (!s.IsEmpty()) {
+ while (!s.is_empty()) {
Address value = s.Pop()->address();
address -= kPointerSize;
CHECK_EQ(address, value);
@@ -78,7 +78,7 @@ TEST(Promotion) {
// from new space.
FLAG_gc_global = true;
FLAG_always_compact = true;
- HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
+ HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
InitializeVM();
@@ -104,7 +104,7 @@ TEST(Promotion) {
TEST(NoPromotion) {
- HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
+ HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
// Test the situation that some objects in new space are promoted to
// the old space
@@ -116,12 +116,9 @@ TEST(NoPromotion) {
HEAP->CollectGarbage(OLD_POINTER_SPACE);
// Allocate a big Fixed array in the new space.
- int max_size =
- Min(HEAP->MaxObjectSizeInPagedSpace(), HEAP->MaxObjectSizeInNewSpace());
-
- int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
- Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
- ToObjectChecked();
+ int size = (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
+ kPointerSize;
+ Object* obj = HEAP->AllocateFixedArray(size)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
@@ -231,8 +228,6 @@ TEST(MarkCompactCollector) {
}
-// TODO(1600): compaction of map space is temporary removed from GC.
-#if 0
static Handle<Map> CreateMap() {
return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
}
@@ -257,11 +252,11 @@ TEST(MapCompact) {
// be able to trigger map compaction.
// To give an additional chance to fail, try to force compaction which
// should be impossible right now.
- HEAP->CollectAllGarbage(Heap::kForceCompactionMask);
+ HEAP->CollectAllGarbage(true);
// And now map pointers should be encodable again.
CHECK(HEAP->map_space()->MapPointersEncodable());
}
-#endif
+
static int gc_starts = 0;
static int gc_ends = 0;
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 8f217e6cde..f5aed96d7d 100755
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -65,7 +65,7 @@ TEST(ScanKeywords) {
i::Utf8ToUC16CharacterStream stream(keyword, length);
i::JavaScriptScanner scanner(&unicode_cache);
// The scanner should parse 'let' as Token::LET for this test.
- scanner.SetHarmonyScoping(true);
+ scanner.SetHarmonyBlockScoping(true);
scanner.Initialize(&stream);
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index def829c297..76fd244e97 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -52,7 +52,7 @@ TEST(TokenEnumerator) {
CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
}
CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index b778478833..89a911274b 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -530,7 +530,7 @@ class TestConfig {
typedef int Key;
typedef int Value;
static const int kNoKey;
- static int NoValue() { return 0; }
+ static const int kNoValue;
static inline int Compare(int a, int b) {
if (a < b)
return -1;
@@ -543,6 +543,7 @@ class TestConfig {
const int TestConfig::kNoKey = 0;
+const int TestConfig::kNoValue = 0;
static unsigned PseudoRandom(int i, int j) {
diff --git a/deps/v8/test/cctest/test-reloc-info.cc b/deps/v8/test/cctest/test-reloc-info.cc
index e638201db2..5bdc4c3e6a 100644
--- a/deps/v8/test/cctest/test-reloc-info.cc
+++ b/deps/v8/test/cctest/test-reloc-info.cc
@@ -34,7 +34,7 @@ namespace internal {
static void WriteRinfo(RelocInfoWriter* writer,
byte* pc, RelocInfo::Mode mode, intptr_t data) {
- RelocInfo rinfo(pc, mode, data, NULL);
+ RelocInfo rinfo(pc, mode, data);
writer->Write(&rinfo);
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index cccd2eec0c..8e85444eee 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -114,6 +114,10 @@ TEST(ExternalReferenceEncoder) {
ExternalReference(isolate->counters()->keyed_load_function_prototype());
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
encoder.Encode(keyed_load_function_prototype.address()));
+ ExternalReference the_hole_value_location =
+ ExternalReference::the_hole_value_location(isolate);
+ CHECK_EQ(make_code(UNCLASSIFIED, 2),
+ encoder.Encode(the_hole_value_location.address()));
ExternalReference stack_limit_address =
ExternalReference::address_of_stack_limit(isolate);
CHECK_EQ(make_code(UNCLASSIFIED, 4),
@@ -123,7 +127,7 @@ TEST(ExternalReferenceEncoder) {
CHECK_EQ(make_code(UNCLASSIFIED, 5),
encoder.Encode(real_stack_limit_address.address()));
#ifdef ENABLE_DEBUGGER_SUPPORT
- CHECK_EQ(make_code(UNCLASSIFIED, 16),
+ CHECK_EQ(make_code(UNCLASSIFIED, 15),
encoder.Encode(ExternalReference::debug_break(isolate).address()));
#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(UNCLASSIFIED, 10),
@@ -153,13 +157,15 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(
make_code(STATS_COUNTER,
Counters::k_keyed_load_function_prototype)));
+ CHECK_EQ(ExternalReference::the_hole_value_location(isolate).address(),
+ decoder.Decode(make_code(UNCLASSIFIED, 2)));
CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
decoder.Decode(make_code(UNCLASSIFIED, 4)));
CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
decoder.Decode(make_code(UNCLASSIFIED, 5)));
#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::debug_break(isolate).address(),
- decoder.Decode(make_code(UNCLASSIFIED, 16)));
+ decoder.Decode(make_code(UNCLASSIFIED, 15)));
#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::new_space_start(isolate).address(),
decoder.Decode(make_code(UNCLASSIFIED, 10)));
@@ -359,8 +365,8 @@ TEST(PartialSerialization) {
Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
}
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
+ HEAP->CollectAllGarbage(true);
Object* raw_foo;
{
@@ -484,7 +490,7 @@ TEST(ContextSerialization) {
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of env.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(true);
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -557,19 +563,16 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
TEST(LinearAllocation) {
v8::V8::Initialize();
int new_space_max = 512 * KB;
- int paged_space_max = Page::kMaxHeapObjectSize;
for (int size = 1000; size < 5 * MB; size += size >> 1) {
- size &= ~8; // Round.
int new_space_size = (size < new_space_max) ? size : new_space_max;
- int paged_space_size = (size < paged_space_max) ? size : paged_space_max;
HEAP->ReserveSpace(
new_space_size,
- paged_space_size, // Old pointer space.
- paged_space_size, // Old data space.
- HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
- HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
- HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+ size, // Old pointer space.
+ size, // Old data space.
+ size, // Code space.
+ size, // Map space.
+ size, // Cell space.
size); // Large object space.
LinearAllocationScope linear_allocation_scope;
const int kSmallFixedArrayLength = 4;
@@ -596,7 +599,7 @@ TEST(LinearAllocation) {
Object* pointer_last = NULL;
for (int i = 0;
- i + kSmallFixedArraySize <= paged_space_size;
+ i + kSmallFixedArraySize <= size;
i += kSmallFixedArraySize) {
Object* obj = HEAP->AllocateFixedArray(kSmallFixedArrayLength,
TENURED)->ToObjectChecked();
@@ -615,9 +618,7 @@ TEST(LinearAllocation) {
}
Object* data_last = NULL;
- for (int i = 0;
- i + kSmallStringSize <= paged_space_size;
- i += kSmallStringSize) {
+ for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
Object* obj = HEAP->AllocateRawAsciiString(kSmallStringLength,
TENURED)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
@@ -635,7 +636,7 @@ TEST(LinearAllocation) {
}
Object* map_last = NULL;
- for (int i = 0; i + kMapSize <= paged_space_size; i += kMapSize) {
+ for (int i = 0; i + kMapSize <= size; i += kMapSize) {
Object* obj = HEAP->AllocateMap(JS_OBJECT_TYPE,
42 * kPointerSize)->ToObjectChecked();
int old_page_fullness = i % Page::kPageSize;
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index ee60086ed2..0f22ce1a9b 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,9 +32,7 @@
using namespace v8::internal;
-#if 0
static void VerifyRegionMarking(Address page_start) {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
Page* p = Page::FromAddress(page_start);
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
@@ -56,13 +54,9 @@ static void VerifyRegionMarking(Address page_start) {
addr += kPointerSize) {
CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
}
-#endif
}
-#endif
-// TODO(gc) you can no longer allocate pages like this. Details are hidden.
-#if 0
TEST(Page) {
byte* mem = NewArray<byte>(2*Page::kPageSize);
CHECK(mem != NULL);
@@ -95,7 +89,6 @@ TEST(Page) {
DeleteArray(mem);
}
-#endif
namespace v8 {
@@ -129,46 +122,62 @@ TEST(MemoryAllocator) {
Isolate* isolate = Isolate::Current();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
- CHECK(isolate->heap()->ConfigureHeapDefault());
-
+ CHECK(heap->ConfigureHeapDefault());
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->Setup(heap->MaxReserved(),
heap->MaxExecutableSize()));
+ TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
- int total_pages = 0;
OldSpace faked_space(heap,
heap->MaxReserved(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
- Page* first_page =
- memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
-
- first_page->InsertAfter(faked_space.anchor()->prev_page());
+ int total_pages = 0;
+ int requested = MemoryAllocator::kPagesPerChunk;
+ int allocated;
+ // If we request n pages, we should get n or n - 1.
+ Page* first_page = memory_allocator->AllocatePages(
+ requested, &allocated, &faked_space);
CHECK(first_page->is_valid());
- CHECK(first_page->next_page() == faked_space.anchor());
- total_pages++;
+ CHECK(allocated == requested || allocated == requested - 1);
+ total_pages += allocated;
- for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
- CHECK(p->owner() == &faked_space);
+ Page* last_page = first_page;
+ for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
+ CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
+ last_page = p;
}
// Again, we should get n or n - 1 pages.
- Page* other =
- memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
- CHECK(other->is_valid());
- total_pages++;
- other->InsertAfter(first_page);
+ Page* others = memory_allocator->AllocatePages(
+ requested, &allocated, &faked_space);
+ CHECK(others->is_valid());
+ CHECK(allocated == requested || allocated == requested - 1);
+ total_pages += allocated;
+
+ memory_allocator->SetNextPage(last_page, others);
int page_count = 0;
- for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
- CHECK(p->owner() == &faked_space);
+ for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
+ CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
page_count++;
}
CHECK(total_pages == page_count);
Page* second_page = first_page->next_page();
CHECK(second_page->is_valid());
- memory_allocator->Free(first_page);
- memory_allocator->Free(second_page);
+
+ // Freeing pages at the first chunk starting at or after the second page
+ // should free the entire second chunk. It will return the page it was passed
+ // (since the second page was in the first chunk).
+ Page* free_return = memory_allocator->FreePages(second_page);
+ CHECK(free_return == second_page);
+ memory_allocator->SetNextPage(first_page, free_return);
+
+ // Freeing pages in the first chunk starting at the first page should free
+ // the first chunk and return an invalid page.
+ Page* invalid_page = memory_allocator->FreePages(first_page);
+ CHECK(!invalid_page->is_valid());
+
memory_allocator->TearDown();
delete memory_allocator;
}
@@ -187,8 +196,12 @@ TEST(NewSpace) {
NewSpace new_space(heap);
- CHECK(new_space.Setup(HEAP->ReservedSemiSpaceSize(),
- HEAP->ReservedSemiSpaceSize()));
+ void* chunk =
+ memory_allocator->ReserveInitialChunk(4 * heap->ReservedSemiSpaceSize());
+ CHECK(chunk != NULL);
+ Address start = RoundUp(static_cast<Address>(chunk),
+ 2 * heap->ReservedSemiSpaceSize());
+ CHECK(new_space.Setup(start, 2 * heap->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetup());
while (new_space.Available() >= Page::kMaxHeapObjectSize) {
@@ -220,7 +233,13 @@ TEST(OldSpace) {
NOT_EXECUTABLE);
CHECK(s != NULL);
- CHECK(s->Setup());
+ void* chunk = memory_allocator->ReserveInitialChunk(
+ 4 * heap->ReservedSemiSpaceSize());
+ CHECK(chunk != NULL);
+ Address start = static_cast<Address>(chunk);
+ size_t size = RoundUp(start, 2 * heap->ReservedSemiSpaceSize()) - start;
+
+ CHECK(s->Setup(start, size));
while (s->Available() > 0) {
s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
@@ -239,12 +258,14 @@ TEST(LargeObjectSpace) {
LargeObjectSpace* lo = HEAP->lo_space();
CHECK(lo != NULL);
+ Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
int lo_size = Page::kPageSize;
- Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
+ Object* obj = lo->AllocateRaw(lo_size)->ToObjectUnchecked();
CHECK(obj->IsHeapObject());
HeapObject* ho = HeapObject::cast(obj);
+ ho->set_map(faked_map);
CHECK(lo->Contains(HeapObject::cast(obj)));
@@ -254,13 +275,14 @@ TEST(LargeObjectSpace) {
while (true) {
intptr_t available = lo->Available();
- { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
+ { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size);
if (!maybe_obj->ToObject(&obj)) break;
}
+ HeapObject::cast(obj)->set_map(faked_map);
CHECK(lo->Available() < available);
};
CHECK(!lo->IsEmpty());
- CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
+ CHECK(lo->AllocateRaw(lo_size)->IsFailure());
}
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 93f7588d36..55c21417d0 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Check that we can traverse very deep stacks of ConsStrings using
// StringInputBuffer. Check that Get(int) works on very deep stacks
@@ -502,35 +502,6 @@ TEST(SliceFromCons) {
}
-class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
- public:
- explicit AsciiVectorResource(i::Vector<const char> vector)
- : data_(vector) {}
- virtual ~AsciiVectorResource() {}
- virtual size_t length() const { return data_.length(); }
- virtual const char* data() const { return data_.start(); }
- private:
- i::Vector<const char> data_;
-};
-
-
-TEST(SliceFromExternal) {
- FLAG_string_slices = true;
- InitializeVM();
- v8::HandleScope scope;
- AsciiVectorResource resource(
- i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
- Handle<String> string = FACTORY->NewExternalStringFromAscii(&resource);
- CHECK(string->IsExternalString());
- Handle<String> slice = FACTORY->NewSubString(string, 1, 25);
- CHECK(slice->IsSlicedString());
- CHECK(string->IsExternalString());
- CHECK_EQ(SlicedString::cast(*slice)->parent(), *string);
- CHECK(SlicedString::cast(*slice)->parent()->IsExternalString());
- CHECK(slice->IsFlat());
-}
-
-
TEST(TrivialSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 713d1e8425..985b9e5b99 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -63,7 +63,7 @@ enum Turn {
static Turn turn = FILL_CACHE;
-class ThreadA : public v8::internal::Thread {
+class ThreadA: public v8::internal::Thread {
public:
ThreadA() : Thread("ThreadA") { }
void Run() {
@@ -99,7 +99,7 @@ class ThreadA : public v8::internal::Thread {
};
-class ThreadB : public v8::internal::Thread {
+class ThreadB: public v8::internal::Thread {
public:
ThreadB() : Thread("ThreadB") { }
void Run() {
@@ -111,7 +111,7 @@ class ThreadB : public v8::internal::Thread {
v8::Context::Scope context_scope(v8::Context::New());
// Clear the caches by forcing major GC.
- HEAP->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(false);
turn = SECOND_TIME_FILL_CACHE;
break;
}
@@ -190,19 +190,3 @@ TEST(ThreadIdValidation) {
delete threads[i];
}
}
-
-
-class ThreadC : public v8::internal::Thread {
- public:
- ThreadC() : Thread("ThreadC") { }
- void Run() {
- Join();
- }
-};
-
-
-TEST(ThreadJoinSelf) {
- ThreadC thread;
- thread.Start();
- thread.Join();
-}
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 56d593628a..db4db25435 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -50,7 +50,7 @@ static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
Handle<JSObject> key,
int value) {
Handle<ObjectHashTable> table = PutIntoObjectHashTable(
- Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
+ Handle<ObjectHashTable>(weakmap->table()),
Handle<JSObject>(JSObject::cast(*key)),
Handle<Smi>(Smi::FromInt(value)));
weakmap->set_table(*table);
@@ -85,14 +85,13 @@ TEST(Weakness) {
v8::HandleScope scope;
PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), 23);
}
- CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+ CHECK_EQ(1, weakmap->table()->NumberOfElements());
// Force a full GC.
HEAP->CollectAllGarbage(false);
CHECK_EQ(0, NumberOfWeakCalls);
- CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
- CHECK_EQ(
- 0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ CHECK_EQ(1, weakmap->table()->NumberOfElements());
+ CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
// Make the global reference to the key weak.
{
@@ -108,14 +107,12 @@ TEST(Weakness) {
// weak references whereas the second one will also clear weak maps.
HEAP->CollectAllGarbage(false);
CHECK_EQ(1, NumberOfWeakCalls);
- CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
- CHECK_EQ(
- 0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ CHECK_EQ(1, weakmap->table()->NumberOfElements());
+ CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
HEAP->CollectAllGarbage(false);
CHECK_EQ(1, NumberOfWeakCalls);
- CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
- CHECK_EQ(
- 1, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ CHECK_EQ(0, weakmap->table()->NumberOfElements());
+ CHECK_EQ(1, weakmap->table()->NumberOfDeletedElements());
}
@@ -125,7 +122,7 @@ TEST(Shrinking) {
Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
// Check initial capacity.
- CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
+ CHECK_EQ(32, weakmap->table()->Capacity());
// Fill up weak map to trigger capacity change.
{
@@ -138,17 +135,15 @@ TEST(Shrinking) {
}
// Check increased capacity.
- CHECK_EQ(128, ObjectHashTable::cast(weakmap->table())->Capacity());
+ CHECK_EQ(128, weakmap->table()->Capacity());
// Force a full GC.
- CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
- CHECK_EQ(
- 0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ CHECK_EQ(32, weakmap->table()->NumberOfElements());
+ CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
HEAP->CollectAllGarbage(false);
- CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
- CHECK_EQ(
- 32, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ CHECK_EQ(0, weakmap->table()->NumberOfElements());
+ CHECK_EQ(32, weakmap->table()->NumberOfDeletedElements());
// Check shrunk capacity.
- CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
+ CHECK_EQ(32, weakmap->table()->Capacity());
}
diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status
index bf3ee8bb5e..d095a2471d 100644
--- a/deps/v8/test/es5conform/es5conform.status
+++ b/deps/v8/test/es5conform/es5conform.status
@@ -41,6 +41,16 @@ chapter10/10.4/10.4.2/10.4.2-2-c-1: FAIL_OK
# We are compatible with Safari and Firefox.
chapter11/11.1/11.1.5: UNIMPLEMENTED
+# We do not have a global object called 'global' as required by tests.
+chapter15/15.1: FAIL_OK
+
+# NaN is writable. We are compatible with JSC.
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
+# Infinity is writable. We are compatible with JSC.
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
+# undefined is writable. We are compatible with JSC.
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
+
# Our Function object has an "arguments" property which is used as a
# non-property in the test.
chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
@@ -96,6 +106,9 @@ chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-7: FAIL_OK
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
+# We do not implement all methods on RegExp.
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-13: FAIL
+
# SUBSETFAIL
chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-14: FAIL_OK
@@ -183,6 +196,27 @@ chapter15/15.4/15.4.4/15.4.4.22/15.4.4.22-9-c-ii-4-s: SKIP
# have no effect on the actual array on which reduceRight is called.
chapter15/15.4/15.4.4/15.4.4.22/15.4.4.22-9-7: FAIL_OK
+# We do not correctly recognize \uFEFF as whitespace
+chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-10: FAIL
+chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-18: FAIL
+chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-34: FAIL
+
+# RegExp.prototype is not of type RegExp - we are bug compatible with JSC.
+chapter15/15.10/15.10.6/15.10.6: FAIL_OK
+
+# We do not have the properties of a RegExp instance on RegExp.prototype.
+# The spec says we should - but we are currently bug compatible with JSC.
+chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-1: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-2: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-1: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-2: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-1: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-2: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-1: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-2: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
+chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
+
##############################################################################
# Unimplemented parts of strict mode
# Setting expectations to fail only so that the tests trigger as soon as
diff --git a/deps/v8/test/mjsunit/array-tostring.js b/deps/v8/test/mjsunit/array-tostring.js
deleted file mode 100644
index 6708657eef..0000000000
--- a/deps/v8/test/mjsunit/array-tostring.js
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Array's toString should call the object's own join method, if one exists and
-// is callable. Otherwise, just use the original Object.toString function.
-
-var success = "[test success]";
-var expectedThis;
-function testJoin() {
- assertEquals(0, arguments.length);
- assertSame(expectedThis, this);
- return success;
-}
-
-
-// On an Array object.
-
-// Default case.
-var a1 = [1, 2, 3];
-assertEquals(a1.join(), a1.toString());
-
-// Non-standard "join" function is called correctly.
-var a2 = [1, 2, 3];
-a2.join = testJoin;
-expectedThis = a2;
-assertEquals(success, a2.toString());
-
-// Non-callable join function is ignored and Object.prototype.toString is
-// used instead.
-var a3 = [1, 2, 3];
-a3.join = "not callable";
-assertEquals("[object Array]", a3.toString());
-
-// Non-existing join function is treated same as non-callable.
-var a4 = [1, 2, 3];
-a4.__proto__ = { toString: Array.prototype.toString };
-// No join on Array.
-assertEquals("[object Array]", a4.toString());
-
-
-// On a non-Array object.
-
-// Default looks-like-an-array case.
-var o1 = {length: 3, 0: 1, 1: 2, 2: 3,
- toString: Array.prototype.toString,
- join: Array.prototype.join};
-assertEquals(o1.join(), o1.toString());
-
-
-// Non-standard join is called correctly.
-// Check that we don't read, e.g., length before calling join.
-var o2 = {toString : Array.prototype.toString,
- join: testJoin,
- get length() { assertUnreachable(); },
- get 0() { assertUnreachable(); }};
-expectedThis = o2;
-assertEquals(success, o2.toString());
-
-// Non-standard join is called even if it looks like an array.
-var o3 = {length: 3, 0: 1, 1: 2, 2: 3,
- toString: Array.prototype.toString,
- join: testJoin};
-expectedThis = o3;
-assertEquals(success, o3.toString());
-
-// Non-callable join works same as for Array.
-var o4 = {length: 3, 0: 1, 1: 2, 2: 3,
- toString: Array.prototype.toString,
- join: "not callable"};
-assertEquals("[object Object]", o4.toString());
-
-
-// Non-existing join works same as for Array.
-var o5 = {length: 3, 0: 1, 1: 2, 2: 3,
- toString: Array.prototype.toString
- /* no join */};
-assertEquals("[object Object]", o5.toString());
-
-
-// Test that ToObject is called before getting "join", so the instance
-// that "join" is read from is the same one passed as receiver later.
-var called_before = false;
-expectedThis = null;
-Object.defineProperty(Number.prototype, "join", {get: function() {
- assertFalse(called_before);
- called_before = true;
- expectedThis = this;
- return testJoin;
- }});
-Number.prototype.arrayToString = Array.prototype.toString;
-assertEquals(success, (42).arrayToString());
-
-// ----------------------------------------------------------
-// Testing Array.prototype.toLocaleString
-
-// Ensure that it never uses Array.prototype.toString for anything.
-Array.prototype.toString = function() { assertUnreachable(); };
-
-// Default case.
-var la1 = [1, [2, 3], 4];
-assertEquals("1,2,3,4", la1.toLocaleString());
-
-// Used on a string (which looks like an array of characters).
-String.prototype.toLocaleString = Array.prototype.toLocaleString;
-assertEquals("1,2,3,4", "1234".toLocaleString());
-
-// If toLocaleString of element is not callable, throw a TypeError.
-var la2 = [1, {toLocaleString: "not callable"}, 3];
-assertThrows(function() { la2.toLocaleString(); }, TypeError);
-
-// If toLocaleString of element is callable, call it.
-var la3 = [1, {toLocaleString: function() { return "XX";}}, 3];
-assertEquals("1,XX,3", la3.toLocaleString());
-
-// Omitted elements, as well as undefined and null, become empty string.
-var la4 = [1, null, 3, undefined, 5,, 7];
-assertEquals("1,,3,,5,,7", la4.toLocaleString());
-
-
-// ToObject is called first and the same object is being used for the
-// rest of the operations.
-Object.defineProperty(Number.prototype, "length", {
- get: function() {
- exptectedThis = this;
- return 3;
- }});
-for (var i = 0; i < 3; i++) {
- Object.defineProperty(Number.prototype, i, {
- get: function() {
- assertEquals(expectedThis, this);
- return +this;
- }});
-}
-Number.prototype.arrayToLocaleString = Array.prototype.toLocaleString;
-assertEquals("42,42,42", (42).arrayToLocaleString()); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/assert-opt-and-deopt.js b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
index 51cb99adc3..c9adb5bb15 100644
--- a/deps/v8/test/mjsunit/assert-opt-and-deopt.js
+++ b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
@@ -150,6 +150,11 @@ tracker.AssertDeoptCount(f, 0);
f(1);
+tracker.AssertOptCount(f, 0);
+tracker.AssertIsOptimized(f, false);
+tracker.AssertDeoptHappened(f, false);
+tracker.AssertDeoptCount(f, 0);
+
%OptimizeFunctionOnNextCall(f);
f(1);
@@ -167,7 +172,6 @@ tracker.AssertDeoptCount(f, 1);
// Let's trigger optimization for another type.
for (var i = 0; i < 5; i++) f("a");
-
%OptimizeFunctionOnNextCall(f);
f("b");
diff --git a/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js b/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js
index fda32eb3d3..a407c531a7 100644
--- a/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug --harmony-block-scoping
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/mjsunit/compiler/inline-context-slots.js b/deps/v8/test/mjsunit/compiler/inline-context-slots.js
deleted file mode 100644
index d0e907b1e5..0000000000
--- a/deps/v8/test/mjsunit/compiler/inline-context-slots.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test inlining of functions with context slots.
-
-// Flags: --allow-natives-syntax
-
-
-// Caller/callee without a local context.
-
-(function() {
- var X = 5;
- var Y = 10;
- function F() {}
- F.prototype.max = function() {
- return X > Y ? X : Y;
- }
- F.prototype.run = function() {
- return this.max();
- }
- var f = new F();
- for (var i=0; i<5; i++) f.run();
- %OptimizeFunctionOnNextCall(f.run);
- assertEquals(10, f.run());
-})();
diff --git a/deps/v8/test/mjsunit/const-redecl.js b/deps/v8/test/mjsunit/const-redecl.js
index c0b97e6ced..945970891b 100644
--- a/deps/v8/test/mjsunit/const-redecl.js
+++ b/deps/v8/test/mjsunit/const-redecl.js
@@ -98,8 +98,7 @@ function TestAll(expected,s,opt_e) {
var msg = s;
if (opt_e) { e = opt_e; msg += "; " + opt_e; }
assertEquals(expected, TestLocal(s,e), "local:'" + msg + "'");
- // Redeclarations of global consts do not throw, they are silently ignored.
- assertEquals(42, TestGlobal(s, 42), "global:'" + msg + "'");
+ assertEquals(expected, TestGlobal(s,e), "global:'" + msg + "'");
assertEquals(expected, TestContext(s,e), "context:'" + msg + "'");
}
@@ -219,62 +218,3 @@ TestAll(0, "var a,b,c,d,e,f,g,h; " + loop, "x");
// Test that const inside with behaves correctly.
TestAll(87, "with ({x:42}) { const x = 87; }", "x");
TestAll(undefined, "with ({x:42}) { const x; }", "x");
-
-
-// Additional tests for how various combinations of re-declarations affect
-// the values of the var/const in question.
-try {
- eval("var undefined;");
-} catch (ex) {
- assertUnreachable("undefined (1) has thrown");
-}
-
-var original_undef = undefined;
-var undefined = 1; // Should be silently ignored.
-assertEquals(original_undef, undefined, "undefined got overwritten");
-undefined = original_undef;
-
-var a; const a; const a = 1;
-assertEquals(1, a, "a has wrong value");
-a = 2;
-assertEquals(2, a, "a should be writable");
-
-var b = 1; const b = 2;
-assertEquals(2, b, "b has wrong value");
-
-var c = 1; const c = 2; const c = 3;
-assertEquals(3, c, "c has wrong value");
-
-const d = 1; const d = 2;
-assertEquals(1, d, "d has wrong value");
-
-const e = 1; var e = 2;
-assertEquals(1, e, "e has wrong value");
-
-const f = 1; const f;
-assertEquals(1, f, "f has wrong value");
-
-var g; const g = 1;
-assertEquals(1, g, "g has wrong value");
-g = 2;
-assertEquals(2, g, "g should be writable");
-
-const h; var h = 1;
-assertEquals(undefined,h, "h has wrong value");
-
-eval("Object.defineProperty(this, 'i', { writable: true });"
- + "const i = 7;"
- + "assertEquals(7, i, \"i has wrong value\");");
-
-var global = this;
-assertThrows(function() {
- Object.defineProperty(global, 'j', { writable: true })
-}, TypeError);
-const j = 2; // This is what makes the function above throw, because the
-// const declaration gets hoisted and makes the property non-configurable.
-assertEquals(2, j, "j has wrong value");
-
-var k = 1; const k;
-// You could argue about the expected result here. For now, the winning
-// argument is that "const k;" is equivalent to "const k = undefined;".
-assertEquals(undefined, k, "k has wrong value");
diff --git a/deps/v8/test/mjsunit/element-kind.js b/deps/v8/test/mjsunit/element-kind.js
index 46fd8f567d..48a029f27e 100644
--- a/deps/v8/test/mjsunit/element-kind.js
+++ b/deps/v8/test/mjsunit/element-kind.js
@@ -25,25 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot. If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case. Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-
-support_smi_only_arrays = %HasFastSmiOnlyElements([]);
-
-if (support_smi_only_arrays) {
- print("Tests include smi-only arrays.");
-} else {
- print("Tests do NOT include smi-only arrays.");
-}
+// Flags: --allow-natives-syntax
+// Test element kind of objects
var element_kind = {
- fast_smi_only_elements : 0,
fast_elements : 1,
fast_double_elements : 2,
dictionary_elements : 3,
@@ -59,17 +44,9 @@ var element_kind = {
}
// We expect an object to only be of one element kind.
-function assertKind(expected, obj) {
- if (support_smi_only_arrays) {
- assertEquals(expected == element_kind.fast_smi_only_elements,
- %HasFastSmiOnlyElements(obj));
- assertEquals(expected == element_kind.fast_elements,
- %HasFastElements(obj));
- } else {
- assertEquals(expected == element_kind.fast_elements ||
- expected == element_kind.fast_smi_only_elements,
- %HasFastElements(obj));
- }
+function assertKind(expected, obj){
+ assertEquals(expected == element_kind.fast_elements,
+ %HasFastElements(obj));
assertEquals(expected == element_kind.fast_double_elements,
%HasFastDoubleElements(obj));
assertEquals(expected == element_kind.dictionary_elements,
@@ -103,30 +80,16 @@ me.dance = 0xD15C0;
me.drink = 0xC0C0A;
assertKind(element_kind.fast_elements, me);
-var too = [1,2,3];
-assertKind(element_kind.fast_smi_only_elements, too);
-too.dance = 0xD15C0;
-too.drink = 0xC0C0A;
-assertKind(element_kind.fast_smi_only_elements, too);
-
-// Make sure the element kind transitions from smionly when a non-smi is stored.
var you = new Array();
-assertKind(element_kind.fast_smi_only_elements, you);
-for (var i = 0; i < 1337; i++) {
- var val = i;
- if (i == 1336) {
- assertKind(element_kind.fast_smi_only_elements, you);
- val = new Object();
- }
- you[i] = val;
+for(i = 0; i < 1337; i++) {
+ you[i] = i;
}
assertKind(element_kind.fast_elements, you);
-assertKind(element_kind.dictionary_elements, new Array(0xDECAF));
+assertKind(element_kind.dictionary_elements, new Array(0xC0C0A));
+
+// fast_double_elements not yet available
-var fast_double_array = new Array(0xDECAF);
-for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
-assertKind(element_kind.fast_double_elements, fast_double_array);
assertKind(element_kind.external_byte_elements, new Int8Array(9001));
assertKind(element_kind.external_unsigned_byte_elements, new Uint8Array(007));
@@ -137,125 +100,3 @@ assertKind(element_kind.external_unsigned_int_elements, new Uint32Array(23));
assertKind(element_kind.external_float_elements, new Float32Array(7));
assertKind(element_kind.external_double_elements, new Float64Array(0));
assertKind(element_kind.external_pixel_elements, new PixelArray(512));
-
-// Crankshaft support for smi-only array elements.
-function monomorphic(array) {
- for (var i = 0; i < 3; i++) {
- array[i] = i + 10;
- }
- assertKind(element_kind.fast_smi_only_elements, array);
- for (var i = 0; i < 3; i++) {
- var a = array[i];
- assertEquals(i + 10, a);
- }
-}
-var smi_only = [1, 2, 3];
-for (var i = 0; i < 3; i++) monomorphic(smi_only);
-%OptimizeFunctionOnNextCall(monomorphic);
-monomorphic(smi_only);
-function polymorphic(array, expected_kind) {
- array[1] = 42;
- assertKind(expected_kind, array);
- var a = array[1];
- assertEquals(42, a);
-}
-var smis = [1, 2, 3];
-var strings = ["one", "two", "three"];
-var doubles = [0, 0, 0]; doubles[0] = 1.5; doubles[1] = 2.5; doubles[2] = 3.5;
-assertKind(support_smi_only_arrays
- ? element_kind.fast_double_elements
- : element_kind.fast_elements,
- doubles);
-for (var i = 0; i < 3; i++) {
- polymorphic(smis, element_kind.fast_smi_only_elements);
- polymorphic(strings, element_kind.fast_elements);
- polymorphic(doubles, support_smi_only_arrays
- ? element_kind.fast_double_elements
- : element_kind.fast_elements);
-}
-%OptimizeFunctionOnNextCall(polymorphic);
-polymorphic(smis, element_kind.fast_smi_only_elements);
-polymorphic(strings, element_kind.fast_elements);
-polymorphic(doubles, support_smi_only_arrays
- ? element_kind.fast_double_elements
- : element_kind.fast_elements);
-
-// Crankshaft support for smi-only elements in dynamic array literals.
-function get(foo) { return foo; } // Used to generate dynamic values.
-
-function crankshaft_test() {
- var a = [get(1), get(2), get(3)];
- assertKind(element_kind.fast_smi_only_elements, a);
- var b = [get(1), get(2), get("three")];
- assertKind(element_kind.fast_elements, b);
- var c = [get(1), get(2), get(3.5)];
- // The full code generator doesn't support conversion to fast_double_elements
- // yet. Crankshaft does, but only with --smi-only-arrays support.
- if ((%GetOptimizationStatus(crankshaft_test) & 1) &&
- support_smi_only_arrays) {
- assertKind(element_kind.fast_double_elements, c);
- } else {
- assertKind(element_kind.fast_elements, c);
- }
-}
-for (var i = 0; i < 3; i++) {
- crankshaft_test();
-}
-%OptimizeFunctionOnNextCall(crankshaft_test);
-crankshaft_test();
-
-// Elements_kind transitions for arrays.
-
-// A map can have three different elements_kind transitions: SMI->DOUBLE,
-// DOUBLE->OBJECT, and SMI->OBJECT. No matter in which order these three are
-// created, they must always end up with the same FAST map.
-
-// This test is meaningless without FAST_SMI_ONLY_ELEMENTS.
-if (support_smi_only_arrays) {
- // Preparation: create one pair of identical objects for each case.
- var a = [1, 2, 3];
- var b = [1, 2, 3];
- assertTrue(%HaveSameMap(a, b));
- assertKind(element_kind.fast_smi_only_elements, a);
- var c = [1, 2, 3];
- c["case2"] = true;
- var d = [1, 2, 3];
- d["case2"] = true;
- assertTrue(%HaveSameMap(c, d));
- assertFalse(%HaveSameMap(a, c));
- assertKind(element_kind.fast_smi_only_elements, c);
- var e = [1, 2, 3];
- e["case3"] = true;
- var f = [1, 2, 3];
- f["case3"] = true;
- assertTrue(%HaveSameMap(e, f));
- assertFalse(%HaveSameMap(a, e));
- assertFalse(%HaveSameMap(c, e));
- assertKind(element_kind.fast_smi_only_elements, e);
- // Case 1: SMI->DOUBLE, DOUBLE->OBJECT, SMI->OBJECT.
- a[0] = 1.5;
- assertKind(element_kind.fast_double_elements, a);
- a[0] = "foo";
- assertKind(element_kind.fast_elements, a);
- b[0] = "bar";
- assertTrue(%HaveSameMap(a, b));
- // Case 2: SMI->DOUBLE, SMI->OBJECT, DOUBLE->OBJECT.
- c[0] = 1.5;
- assertKind(element_kind.fast_double_elements, c);
- assertFalse(%HaveSameMap(c, d));
- d[0] = "foo";
- assertKind(element_kind.fast_elements, d);
- assertFalse(%HaveSameMap(c, d));
- c[0] = "bar";
- assertTrue(%HaveSameMap(c, d));
- // Case 3: SMI->OBJECT, SMI->DOUBLE, DOUBLE->OBJECT.
- e[0] = "foo";
- assertKind(element_kind.fast_elements, e);
- assertFalse(%HaveSameMap(e, f));
- f[0] = 1.5;
- assertKind(element_kind.fast_double_elements, f);
- assertFalse(%HaveSameMap(e, f));
- f[0] = "bar";
- assertKind(element_kind.fast_elements, f);
- assertTrue(%HaveSameMap(e, f));
-}
diff --git a/deps/v8/test/mjsunit/global-const-var-conflicts.js b/deps/v8/test/mjsunit/global-const-var-conflicts.js
index 2fca96f9f8..d38d0ee813 100644
--- a/deps/v8/test/mjsunit/global-const-var-conflicts.js
+++ b/deps/v8/test/mjsunit/global-const-var-conflicts.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Check that dynamically introducing conflicting consts/vars
-// is silently ignored (and does not lead to exceptions).
+// leads to exceptions.
var caught = 0;
@@ -46,12 +46,12 @@ eval("var c");
try { eval("const c"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
assertTrue(typeof c == 'undefined');
try { eval("const c = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(1, c);
+assertTrue(typeof c == 'undefined');
eval("var d = 0");
try { eval("const d"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(undefined, d);
+assertEquals(0, d);
try { eval("const d = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(1, d);
+assertEquals(0, d);
-assertEquals(0, caught);
+assertEquals(8, caught);
diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts.js b/deps/v8/test/mjsunit/harmony/block-conflicts.js
index 8b171f1710..8d3de6f9d6 100644
--- a/deps/v8/test/mjsunit/harmony/block-conflicts.js
+++ b/deps/v8/test/mjsunit/harmony/block-conflicts.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
+// Flags: --harmony-block-scoping
// Test for conflicting variable bindings.
diff --git a/deps/v8/test/mjsunit/harmony/block-leave.js b/deps/v8/test/mjsunit/harmony/block-leave.js
index e6e3cef1db..73eaf29449 100644
--- a/deps/v8/test/mjsunit/harmony/block-leave.js
+++ b/deps/v8/test/mjsunit/harmony/block-leave.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
+// Flags: --harmony-block-scoping
// We want to test the context chain shape. In each of the tests cases
// below, the outer with is to force a runtime lookup of the identifier 'x'
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
index 98d1464a61..c2fb96b6a4 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --allow-natives-syntax
+// Flags: --harmony-block-scoping --allow-natives-syntax
// Test that temporal dead zone semantics for function and block scoped
// ket bindings are handled by the optimizing compiler.
diff --git a/deps/v8/test/mjsunit/harmony/block-let-declaration.js b/deps/v8/test/mjsunit/harmony/block-let-declaration.js
index 7f3264f257..49b63481a0 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-declaration.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-declaration.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
+// Flags: --harmony-block-scoping
// Test let declarations in various settings.
@@ -47,70 +47,19 @@ if (true) {
assertEquals(undefined, y);
}
-// Invalid declarations are early errors in harmony mode and thus should trigger
-// an exception in eval code during parsing, before even compiling or executing
-// the code. Thus the generated function is not called here.
function TestLocalThrows(str, expect) {
- assertThrows("(function(){" + str + "})", expect);
+ assertThrows("(function(){" + str + "})()", expect);
}
function TestLocalDoesNotThrow(str) {
assertDoesNotThrow("(function(){" + str + "})()");
}
-// Test let declarations statement positions.
+// Unprotected statement
TestLocalThrows("if (true) let x;", SyntaxError);
-TestLocalThrows("if (true) {} else let x;", SyntaxError);
TestLocalThrows("do let x; while (false)", SyntaxError);
TestLocalThrows("while (false) let x;", SyntaxError);
-TestLocalThrows("label: let x;", SyntaxError);
-TestLocalThrows("for (;false;) let x;", SyntaxError);
-TestLocalThrows("switch (true) { case true: let x; }", SyntaxError);
-TestLocalThrows("switch (true) { default: let x; }", SyntaxError);
-// Test var declarations statement positions.
TestLocalDoesNotThrow("if (true) var x;");
-TestLocalDoesNotThrow("if (true) {} else var x;");
TestLocalDoesNotThrow("do var x; while (false)");
TestLocalDoesNotThrow("while (false) var x;");
-TestLocalDoesNotThrow("label: var x;");
-TestLocalDoesNotThrow("for (;false;) var x;");
-TestLocalDoesNotThrow("switch (true) { case true: var x; }");
-TestLocalDoesNotThrow("switch (true) { default: var x; }");
-
-// Test function declarations in source element and
-// non-strict statement positions.
-function f() {
- // Non-strict source element positions.
- function g0() {
- "use strict";
- // Strict source element positions.
- function h() { }
- {
- function h1() { }
- }
- }
- {
- function g1() { }
- }
- // Non-strict statement positions.
- if (true) function g2() { }
- if (true) {} else function g3() { }
- do function g4() { } while (false)
- while (false) function g5() { }
- label: function g6() { }
- for (;false;) function g7() { }
- switch (true) { case true: function g8() { } }
- switch (true) { default: function g9() { } }
-}
-f();
-
-// Test function declarations in statement position in strict mode.
-TestLocalThrows("function f() { 'use strict'; if (true) function g() {}", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; if (true) {} else function g() {}", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; do function g() {} while (false)", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; while (false) function g() {}", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; label: function g() {}", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; for (;false;) function g() {}", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; switch (true) { case true: function g() {} }", SyntaxError);
-TestLocalThrows("function f() { 'use strict'; switch (true) { default: function g() {} }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/block-let-semantics.js b/deps/v8/test/mjsunit/harmony/block-let-semantics.js
index 94020a4ca9..198c3b4fb9 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-semantics.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-semantics.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
+// Flags: --harmony-block-scoping
// Test temporal dead zone semantics of let bound variables in
// function and block scopes.
@@ -81,20 +81,13 @@ TestAll('f()(); let x; function f() { return function() { x += 1; } }');
TestAll('f()(); let x; function f() { return function() { ++x; } }');
TestAll('f()(); let x; function f() { return function() { x++; } }');
-// Use before initialization with a dynamic lookup.
+// Use in before initialization with a dynamic lookup.
TestAll('eval("x + 1;"); let x;');
TestAll('eval("x = 1;"); let x;');
TestAll('eval("x += 1;"); let x;');
TestAll('eval("++x;"); let x;');
TestAll('eval("x++;"); let x;');
-// Use before initialization with check for eval-shadowed bindings.
-TestAll('function f() { eval("var y = 2;"); x + 1; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); x = 1; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); x += 1; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); ++x; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); x++; }; f(); let x;');
-
// Test that variables introduced by function declarations are created and
// initialized upon entering a function / block scope.
function f() {
@@ -143,19 +136,3 @@ function f2() {
}
assertEquals(5, n());
}
-
-// Test that resolution of let bound variables works with scopes that call eval.
-function outer() {
- function middle() {
- function inner() {
- return x;
- }
- eval("1 + 1");
- return x + inner();
- }
-
- let x = 1;
- return middle();
-}
-
-assertEquals(2, outer());
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping.js b/deps/v8/test/mjsunit/harmony/block-scoping.js
index c70b3b6ea8..266e380725 100644
--- a/deps/v8/test/mjsunit/harmony/block-scoping.js
+++ b/deps/v8/test/mjsunit/harmony/block-scoping.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-scoping
+// Flags: --allow-natives-syntax --harmony-block-scoping
// Test functionality of block scopes.
// Hoisting of var declarations.
diff --git a/deps/v8/test/mjsunit/harmony/debug-blockscopes.js b/deps/v8/test/mjsunit/harmony/debug-blockscopes.js
index 020f52774b..0230e84b5e 100644
--- a/deps/v8/test/mjsunit/harmony/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/harmony/debug-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug --harmony-block-scoping
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js b/deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js
index 06139d3965..549960a289 100644
--- a/deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js
+++ b/deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug --harmony-block-scoping
// Test debug evaluation for functions without local context, but with
// nested catch contexts.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-function.js b/deps/v8/test/mjsunit/harmony/proxies-function.js
deleted file mode 100644
index 541bca8cc8..0000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-function.js
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-proxies
-
-
-// Helper.
-
-function CreateFrozen(handler, callTrap, constructTrap) {
- if (handler.fix === undefined) handler.fix = function() { return {} }
- var f = Proxy.createFunction(handler, callTrap, constructTrap)
- Object.freeze(f)
- return f
-}
-
-
-// Calling (call, Function.prototype.call, Function.prototype.apply,
-// Function.prototype.bind).
-
-var global_object = this
-var receiver
-
-function TestCall(isStrict, callTrap) {
- assertEquals(42, callTrap(5, 37))
- // TODO(rossberg): unrelated bug: this does not succeed for optimized code:
- // assertEquals(isStrict ? undefined : global_object, receiver)
-
- var f = Proxy.createFunction({}, callTrap)
- receiver = 333
- assertEquals(42, f(11, 31))
- assertEquals(isStrict ? undefined : global_object, receiver)
- var o = {}
- assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
- assertEquals(o, receiver)
- assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
- assertEquals(isStrict ? null : global_object, receiver)
- assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
- assertEquals(2, receiver.valueOf())
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
- assertEquals(o, receiver)
- var ff = Function.prototype.bind.call(f, o, 12)
- receiver = 333
- assertEquals(42, ff(30))
- assertEquals(o, receiver)
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
- assertEquals(o, receiver)
-
- var f = CreateFrozen({}, callTrap)
- receiver = 333
- assertEquals(42, f(11, 31))
- // TODO(rossberg): unrelated bug: this does not succeed for optimized code.
- // assertEquals(isStrict ? undefined : global, receiver)
- receiver = 333
- assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
- assertEquals(o, receiver)
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
- assertEquals(o, receiver)
- receiver = 333
- assertEquals(42, ff(30))
- assertEquals(o, receiver)
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
- assertEquals(o, receiver)
-}
-
-TestCall(false, function(x, y) {
- receiver = this; return x + y
-})
-
-TestCall(true, function(x, y) {
- "use strict";
- receiver = this; return x + y
-})
-
-TestCall(false, Proxy.createFunction({}, function(x, y) {
- receiver = this; return x + y
-}))
-
-TestCall(true, Proxy.createFunction({}, function(x, y) {
- "use strict";
- receiver = this; return x + y
-}))
-
-TestCall(false, CreateFrozen({}, function(x, y) {
- receiver = this; return x + y
-}))
-
-
-function TestCallThrow(callTrap) {
- var f = Proxy.createFunction({}, callTrap)
- assertThrows(function(){ f(11) }, "myexn")
- assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
- assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-
- var f = CreateFrozen({}, callTrap)
- assertThrows(function(){ f(11) }, "myexn")
- assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
- assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-}
-
-TestCallThrow(function() { throw "myexn" })
-TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
-
-
-
-// Construction (new).
-
-var prototype = {}
-var receiver
-
-var handlerWithPrototype = {
- fix: function() { return {prototype: prototype} },
- get: function(r, n) { assertEquals("prototype", n); return prototype }
-}
-
-var handlerSansPrototype = {
- fix: function() { return {} },
- get: function(r, n) { assertEquals("prototype", n); return undefined }
-}
-
-function ReturnUndef(x, y) { "use strict"; receiver = this; this.sum = x + y }
-function ReturnThis(x, y) { "use strict"; receiver = this; this.sum = x + y; return this }
-function ReturnNew(x, y) { "use strict"; receiver = this; return {sum: x + y} }
-function ReturnNewWithProto(x, y) {
- "use strict";
- receiver = this;
- var result = Object.create(prototype)
- result.sum = x + y
- return result
-}
-
-function TestConstruct(proto, constructTrap) {
- TestConstruct2(proto, constructTrap, handlerWithPrototype)
- TestConstruct2(proto, constructTrap, handlerSansPrototype)
-}
-
-function TestConstruct2(proto, constructTrap, handler) {
- var f = Proxy.createFunction(handler, function() {}, constructTrap)
- var o = new f(11, 31)
- // TODO(rossberg): doesn't hold, due to unrelated bug.
- // assertEquals(undefined, receiver)
- assertEquals(42, o.sum)
- assertSame(proto, Object.getPrototypeOf(o))
-
- var f = CreateFrozen(handler, function() {}, constructTrap)
- var o = new f(11, 32)
- // TODO(rossberg): doesn't hold, due to unrelated bug.
- // assertEquals(undefined, receiver)
- assertEquals(43, o.sum)
- assertSame(proto, Object.getPrototypeOf(o))
-}
-
-TestConstruct(Object.prototype, ReturnNew)
-TestConstruct(prototype, ReturnNewWithProto)
-
-TestConstruct(Object.prototype, Proxy.createFunction({}, ReturnNew))
-TestConstruct(prototype, Proxy.createFunction({}, ReturnNewWithProto))
-
-TestConstruct(Object.prototype, CreateFrozen({}, ReturnNew))
-TestConstruct(prototype, CreateFrozen({}, ReturnNewWithProto))
-
-
-function TestConstructFromCall(proto, returnsThis, callTrap) {
- TestConstructFromCall2(proto, returnsThis, callTrap, handlerWithPrototype)
- TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
-}
-
-function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
- var f = Proxy.createFunction(handler, callTrap)
- var o = new f(11, 31)
- if (returnsThis) assertEquals(o, receiver)
- assertEquals(42, o.sum)
- assertSame(proto, Object.getPrototypeOf(o))
-
- var f = CreateFrozen(handler, callTrap)
- var o = new f(11, 32)
- if (returnsThis) assertEquals(o, receiver)
- assertEquals(43, o.sum)
- assertSame(proto, Object.getPrototypeOf(o))
-}
-
-TestConstructFromCall(Object.prototype, true, ReturnUndef)
-TestConstructFromCall(Object.prototype, true, ReturnThis)
-TestConstructFromCall(Object.prototype, false, ReturnNew)
-TestConstructFromCall(prototype, false, ReturnNewWithProto)
-
-TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnUndef))
-TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnThis))
-TestConstructFromCall(Object.prototype, false, Proxy.createFunction({}, ReturnNew))
-TestConstructFromCall(prototype, false, Proxy.createFunction({}, ReturnNewWithProto))
-
-TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
-TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
-TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
-TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
-
-ReturnUndef.prototype = prototype
-ReturnThis.prototype = prototype
-ReturnNew.prototype = prototype
-ReturnNewWithProto.prototype = prototype
-
-TestConstructFromCall(prototype, true, ReturnUndef)
-TestConstructFromCall(prototype, true, ReturnThis)
-TestConstructFromCall(Object.prototype, false, ReturnNew)
-TestConstructFromCall(prototype, false, ReturnNewWithProto)
-
-TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnUndef))
-TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnThis))
-TestConstructFromCall(Object.prototype, false, Proxy.createFunction({}, ReturnNew))
-TestConstructFromCall(prototype, false, Proxy.createFunction({}, ReturnNewWithProto))
-
-TestConstructFromCall(prototype, true, Proxy.createFunction(handlerWithPrototype, ReturnUndef))
-TestConstructFromCall(prototype, true, Proxy.createFunction(handlerWithPrototype, ReturnThis))
-TestConstructFromCall(Object.prototype, false, Proxy.createFunction(handlerWithPrototype, ReturnNew))
-TestConstructFromCall(prototype, false, Proxy.createFunction(handlerWithPrototype, ReturnNewWithProto))
-
-TestConstructFromCall(prototype, true, CreateFrozen(handlerWithPrototype, ReturnUndef))
-TestConstructFromCall(prototype, true, CreateFrozen(handlerWithPrototype, ReturnThis))
-TestConstructFromCall(Object.prototype, false, CreateFrozen(handlerWithPrototype, ReturnNew))
-TestConstructFromCall(prototype, false, CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
-
-
-function TestConstructThrow(trap) {
- TestConstructThrow2(Proxy.createFunction({fix: function() {return {}}}, trap))
- TestConstructThrow2(Proxy.createFunction({fix: function() {return {}}},
- function() {}, trap))
-}
-
-function TestConstructThrow2(f) {
- assertThrows(function(){ new f(11) }, "myexn")
- Object.freeze(f)
- assertThrows(function(){ new f(11) }, "myexn")
-}
-
-TestConstructThrow(function() { throw "myexn" })
-TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
-
-
-
-// Getters and setters.
-
-var value
-var receiver
-
-function TestAccessorCall(getterCallTrap, setterCallTrap) {
- var handler = {fix: function() { return {} }}
- var pgetter = Proxy.createFunction(handler, getterCallTrap)
- var psetter = Proxy.createFunction(handler, setterCallTrap)
-
- var o = {}
- var oo = Object.create(o)
- Object.defineProperty(o, "a", {get: pgetter, set: psetter})
- Object.defineProperty(o, "b", {get: pgetter})
- Object.defineProperty(o, "c", {set: psetter})
- Object.defineProperty(o, "3", {get: pgetter, set: psetter})
- Object.defineProperty(oo, "a", {value: 43})
-
- receiver = ""
- assertEquals(42, o.a)
- assertSame(o, receiver)
- receiver = ""
- assertEquals(42, o.b)
- assertSame(o, receiver)
- receiver = ""
- assertEquals(undefined, o.c)
- assertEquals("", receiver)
- receiver = ""
- assertEquals(42, o["a"])
- assertSame(o, receiver)
- receiver = ""
- assertEquals(42, o[3])
- assertSame(o, receiver)
-
- receiver = ""
- assertEquals(43, oo.a)
- assertEquals("", receiver)
- receiver = ""
- assertEquals(42, oo.b)
- assertSame(o, receiver)
- receiver = ""
- assertEquals(undefined, oo.c)
- assertEquals("", receiver)
- receiver = ""
- assertEquals(43, oo["a"])
- assertEquals("", receiver)
- receiver = ""
- assertEquals(42, oo[3])
- assertSame(o, receiver)
-
- receiver = ""
- assertEquals(50, o.a = 50)
- assertSame(o, receiver)
- assertEquals(50, value)
- receiver = ""
- assertEquals(51, o.b = 51)
- assertEquals("", receiver)
- assertEquals(50, value) // no setter
- assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
- receiver = ""
- assertEquals(52, o.c = 52)
- assertSame(o, receiver)
- assertEquals(52, value)
- receiver = ""
- assertEquals(53, o["a"] = 53)
- assertSame(o, receiver)
- assertEquals(53, value)
- receiver = ""
- assertEquals(54, o[3] = 54)
- assertSame(o, receiver)
- assertEquals(54, value)
-
- value = 0
- receiver = ""
- assertEquals(60, oo.a = 60)
- assertEquals("", receiver)
- assertEquals(0, value) // oo has own 'a'
- assertEquals(61, oo.b = 61)
- assertSame("", receiver)
- assertEquals(0, value) // no setter
- assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
- receiver = ""
- assertEquals(62, oo.c = 62)
- assertSame(oo, receiver)
- assertEquals(62, value)
- receiver = ""
- assertEquals(63, oo["c"] = 63)
- assertSame(oo, receiver)
- assertEquals(63, value)
- receiver = ""
- assertEquals(64, oo[3] = 64)
- assertSame(oo, receiver)
- assertEquals(64, value)
-}
-
-TestAccessorCall(
- function() { receiver = this; return 42 },
- function(x) { receiver = this; value = x }
-)
-
-TestAccessorCall(
- function() { "use strict"; receiver = this; return 42 },
- function(x) { "use strict"; receiver = this; value = x }
-)
-
-TestAccessorCall(
- Proxy.createFunction({}, function() { receiver = this; return 42 }),
- Proxy.createFunction({}, function(x) { receiver = this; value = x })
-)
-
-TestAccessorCall(
- CreateFrozen({}, function() { receiver = this; return 42 }),
- CreateFrozen({}, function(x) { receiver = this; value = x })
-)
diff --git a/deps/v8/test/mjsunit/harmony/proxies-hash.js b/deps/v8/test/mjsunit/harmony/proxies-hash.js
deleted file mode 100644
index 2bf1830134..0000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-hash.js
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-proxies --harmony-weakmaps
-
-
-// Helper.
-
-function TestWithProxies(test, handler) {
- test(handler, Proxy.create)
- test(handler, function(h) {return Proxy.createFunction(h, function() {})})
-}
-
-
-// Weak maps.
-
-function TestWeakMap(fix) {
- TestWithProxies(TestWeakMap2, fix)
-}
-
-function TestWeakMap2(fix, create) {
- var handler = {fix: function() { return {} }}
- var p1 = create(handler)
- var p2 = create(handler)
- var p3 = create(handler)
- fix(p3)
-
- var m = new WeakMap
- m.set(p1, 123);
- m.set(p2, 321);
- assertSame(123, m.get(p1));
- assertSame(321, m.get(p2));
-
- fix(p1)
- fix(p2)
- assertSame(123, m.get(p1));
- assertSame(321, m.get(p2));
-}
-
-TestWeakMap(Object.seal)
-TestWeakMap(Object.freeze)
-TestWeakMap(Object.preventExtensions)
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index ad8d86a5dd..3c4e5f61c5 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,161 +28,70 @@
// Flags: --harmony-proxies
-// TODO(rossberg): for-in not implemented on proxies.
+// TODO(rossberg): for-in for proxies not implemented.
+// TODO(rossberg): inheritance from proxies not implemented.
+// TODO(rossberg): function proxies as constructors not implemented.
// Helper.
-function TestWithProxies(test, x, y, z) {
- test(Proxy.create, x, y, z)
- test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+function TestWithProxies(test, handler) {
+ test(handler, Proxy.create)
+ test(handler, function(h) {return Proxy.createFunction(h, function() {})})
}
-
-// Getting property descriptors (Object.getOwnPropertyDescriptor).
-
-var key
-
-function TestGetOwnProperty(handler) {
- TestWithProxies(TestGetOwnProperty2, handler)
-}
-
-function TestGetOwnProperty2(create, handler) {
- var p = create(handler)
- assertEquals(42, Object.getOwnPropertyDescriptor(p, "a").value)
- assertEquals("a", key)
- assertEquals(42, Object.getOwnPropertyDescriptor(p, 99).value)
- assertEquals("99", key)
-}
-
-TestGetOwnProperty({
- getOwnPropertyDescriptor: function(k) {
- key = k
- return {value: 42, configurable: true}
- }
-})
-
-TestGetOwnProperty({
- getOwnPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor2(k)
- },
- getOwnPropertyDescriptor2: function(k) {
- key = k
- return {value: 42, configurable: true}
- }
-})
-
-TestGetOwnProperty({
- getOwnPropertyDescriptor: function(k) {
- key = k
- return {get value() { return 42 }, get configurable() { return true }}
- }
-})
-
-TestGetOwnProperty(Proxy.create({
- get: function(pr, pk) {
- return function(k) { key = k; return {value: 42, configurable: true} }
- }
-}))
-
-
-function TestGetOwnPropertyThrow(handler) {
- TestWithProxies(TestGetOwnPropertyThrow2, handler)
-}
-
-function TestGetOwnPropertyThrow2(create, handler) {
- var p = create(handler)
- assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
- assertThrows(function(){ Object.getOwnPropertyDescriptor(p, 77) }, "myexn")
-}
-
-TestGetOwnPropertyThrow({
- getOwnPropertyDescriptor: function(k) { throw "myexn" }
-})
-
-TestGetOwnPropertyThrow({
- getOwnPropertyDescriptor: function(k) {
- return this.getPropertyDescriptor2(k)
- },
- getOwnPropertyDescriptor2: function(k) { throw "myexn" }
-})
-
-TestGetOwnPropertyThrow({
- getOwnPropertyDescriptor: function(k) {
- return {get value() { throw "myexn" }}
- }
-})
-
-TestGetOwnPropertyThrow(Proxy.create({
- get: function(pr, pk) {
- return function(k) { throw "myexn" }
- }
-}))
-
-
-
-// Getters (dot, brackets).
-
-var key
+// Getters.
function TestGet(handler) {
TestWithProxies(TestGet2, handler)
}
-function TestGet2(create, handler) {
+function TestGet2(handler, create) {
var p = create(handler)
assertEquals(42, p.a)
- assertEquals("a", key)
assertEquals(42, p["b"])
- assertEquals("b", key)
- assertEquals(42, p[99])
- assertEquals("99", key)
- var o = Object.create(p, {x: {value: 88}})
- assertEquals(42, o.a)
- assertEquals("a", key)
- assertEquals(42, o["b"])
- assertEquals("b", key)
- assertEquals(42, o[99])
- assertEquals("99", key)
- assertEquals(88, o.x)
- assertEquals(88, o["x"])
+ // TODO(rossberg): inheritance from proxies not yet implemented.
+ // var o = Object.create(p, {x: {value: 88}})
+ // assertEquals(42, o.a)
+ // assertEquals(42, o["b"])
+ // assertEquals(88, o.x)
+ // assertEquals(88, o["x"])
}
TestGet({
- get: function(r, k) { key = k; return 42 }
+ get: function(r, k) { return 42 }
})
TestGet({
get: function(r, k) { return this.get2(r, k) },
- get2: function(r, k) { key = k; return 42 }
+ get2: function(r, k) { return 42 }
})
TestGet({
- getPropertyDescriptor: function(k) { key = k; return {value: 42} }
+ getPropertyDescriptor: function(k) { return {value: 42} }
})
TestGet({
getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) { key = k; return {value: 42} }
+ getPropertyDescriptor2: function(k) { return {value: 42} }
})
TestGet({
getPropertyDescriptor: function(k) {
- key = k;
return {get value() { return 42 }}
}
})
TestGet({
get: undefined,
- getPropertyDescriptor: function(k) { key = k; return {value: 42} }
+ getPropertyDescriptor: function(k) { return {value: 42} }
})
TestGet(Proxy.create({
get: function(pr, pk) {
- return function(r, k) { key = k; return 42 }
+ return function(r, k) { return 42 }
}
}))
@@ -191,32 +100,14 @@ function TestGetCall(handler) {
TestWithProxies(TestGetCall2, handler)
}
-function TestGetCall2(create, handler) {
+function TestGetCall2(handler, create) {
var p = create(handler)
assertEquals(55, p.f())
- assertEquals(55, p["f"]())
assertEquals(55, p.f("unused", "arguments"))
assertEquals(55, p.f.call(p))
- assertEquals(55, p["f"].call(p))
- assertEquals(55, p[101].call(p))
assertEquals(55, p.withargs(45, 5))
assertEquals(55, p.withargs.call(p, 11, 22))
assertEquals("6655", "66" + p) // calls p.toString
-
- var o = Object.create(p, {g: {value: function(x) { return x + 88 }}})
- assertEquals(55, o.f())
- assertEquals(55, o["f"]())
- assertEquals(55, o.f("unused", "arguments"))
- assertEquals(55, o.f.call(o))
- assertEquals(55, o.f.call(p))
- assertEquals(55, o["f"].call(p))
- assertEquals(55, o[101].call(p))
- assertEquals(55, o.withargs(45, 5))
- assertEquals(55, o.withargs.call(p, 11, 22))
- assertEquals(90, o.g(2))
- assertEquals(91, o.g.call(o, 3))
- assertEquals(92, o.g.call(p, 4))
- assertEquals("6655", "66" + o) // calls o.toString
}
TestGetCall({
@@ -277,19 +168,10 @@ function TestGetThrow(handler) {
TestWithProxies(TestGetThrow2, handler)
}
-function TestGetThrow2(create, handler) {
+function TestGetThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ p.a }, "myexn")
assertThrows(function(){ p["b"] }, "myexn")
- assertThrows(function(){ p[3] }, "myexn")
-
- var o = Object.create(p, {x: {value: 88}, '4': {value: 89}})
- assertThrows(function(){ o.a }, "myexn")
- assertThrows(function(){ o["b"] }, "myexn")
- assertThrows(function(){ o[3] }, "myexn")
- assertEquals(88, o.x)
- assertEquals(88, o["x"])
- assertEquals(89, o[4])
}
TestGetThrow({
@@ -338,11 +220,11 @@ TestGetThrow(Proxy.create({
var key
var val
-function TestSet(handler) {
+function TestSet(handler, create) {
TestWithProxies(TestSet2, handler)
}
-function TestSet2(create, handler) {
+function TestSet2(handler, create) {
var p = create(handler)
assertEquals(42, p.a = 42)
assertEquals("a", key)
@@ -350,9 +232,6 @@ function TestSet2(create, handler) {
assertEquals(43, p["b"] = 43)
assertEquals("b", key)
assertEquals(43, val)
- assertEquals(44, p[77] = 44)
- assertEquals("77", key)
- assertEquals(44, val)
}
TestSet({
@@ -425,15 +304,15 @@ TestSet(Proxy.create({
}))
-function TestSetThrow(handler) {
+
+function TestSetThrow(handler, create) {
TestWithProxies(TestSetThrow2, handler)
}
-function TestSetThrow2(create, handler) {
+function TestSetThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ p.a = 42 }, "myexn")
assertThrows(function(){ p["b"] = 42 }, "myexn")
- assertThrows(function(){ p[22] = 42 }, "myexn")
}
TestSetThrow({
@@ -545,124 +424,6 @@ TestSetThrow(Proxy.create({
}))
-var key
-var val
-
-function TestSetForDerived(handler) {
- TestWithProxies(TestSetForDerived2, handler)
-}
-
-function TestSetForDerived2(create, handler) {
- var p = create(handler)
- var o = Object.create(p, {x: {value: 88, writable: true},
- '1': {value: 89, writable: true}})
-
- key = ""
- assertEquals(48, o.x = 48)
- assertEquals("", key) // trap not invoked
- assertEquals(48, o.x)
-
- assertEquals(47, o[1] = 47)
- assertEquals("", key) // trap not invoked
- assertEquals(47, o[1])
-
- assertEquals(49, o.y = 49)
- assertEquals("y", key)
- assertEquals(49, o.y)
-
- assertEquals(50, o[2] = 50)
- assertEquals("2", key)
- assertEquals(50, o[2])
-
- assertEquals(44, o.p_writable = 44)
- assertEquals("p_writable", key)
- assertEquals(44, o.p_writable)
-
- assertEquals(45, o.p_nonwritable = 45)
- assertEquals("p_nonwritable", key)
- assertEquals(45, o.p_nonwritable)
-
- assertEquals(46, o.p_setter = 46)
- assertEquals("p_setter", key)
- assertEquals(46, val) // written to parent
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_setter"))
-
- val = ""
- assertEquals(47, o.p_nosetter = 47)
- assertEquals("p_nosetter", key)
- assertEquals("", val) // not written at all
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nosetter"));
-
- key = ""
- assertThrows(function(){ "use strict"; o.p_nosetter = 50 }, TypeError)
- assertEquals("p_nosetter", key)
- assertEquals("", val) // not written at all
-
- assertThrows(function(){ o.p_nonconf = 53 }, TypeError)
- assertEquals("p_nonconf", key)
-
- assertThrows(function(){ o.p_throw = 51 }, "myexn")
- assertEquals("p_throw", key)
-
- assertThrows(function(){ o.p_setterthrow = 52 }, "myexn")
- assertEquals("p_setterthrow", key)
-}
-
-TestSetForDerived({
- getPropertyDescriptor: function(k) {
- key = k;
- switch (k) {
- case "p_writable": return {writable: true, configurable: true}
- case "p_nonwritable": return {writable: false, configurable: true}
- case "p_setter":return {set: function(x) { val = x }, configurable: true}
- case "p_nosetter": return {get: function() { return 1 }, configurable: true}
- case "p_nonconf":return {}
- case "p_throw": throw "myexn"
- case "p_setterthrow": return {set: function(x) { throw "myexn" }}
- default: return undefined
- }
- }
-})
-
-
-// Evil proxy-induced side-effects shouldn't crash.
-// TODO(rossberg): proper behaviour isn't really spec'ed yet, so ignore results.
-
-TestWithProxies(function(create) {
- var calls = 0
- var handler = {
- getPropertyDescriptor: function() {
- ++calls
- return (calls % 2 == 1)
- ? {get: function() { return 5 }, configurable: true}
- : {set: function() { return false }, configurable: true}
- }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Make proxy prototype property read-only after CanPut check.
- try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestWithProxies(function(create) {
- var handler = {
- getPropertyDescriptor: function() {
- Object.defineProperty(o, "x", {get: function() { return 5 }});
- return {set: function() {}}
- }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Make object property read-only after CanPut check.
- try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-
-
-// TODO(rossberg): TestSetReject, returning false
-// TODO(rossberg): TestGetProperty, TestSetProperty
-
-
// Property definition (Object.defineProperty and Object.defineProperties).
@@ -673,7 +434,7 @@ function TestDefine(handler) {
TestWithProxies(TestDefine2, handler)
}
-function TestDefine2(create, handler) {
+function TestDefine2(handler, create) {
var p = create(handler)
assertEquals(p, Object.defineProperty(p, "a", {value: 44}))
assertEquals("a", key)
@@ -692,12 +453,6 @@ function TestDefine2(create, handler) {
assertEquals(46, desc.value)
assertEquals(false, desc.enumerable)
- assertEquals(p, Object.defineProperty(p, 101, {value: 47, enumerable: false}))
- assertEquals("101", key)
- assertEquals(2, Object.getOwnPropertyNames(desc).length)
- assertEquals(47, desc.value)
- assertEquals(false, desc.enumerable)
-
var attributes = {configurable: true, mine: 66, minetoo: 23}
assertEquals(p, Object.defineProperty(p, "d", attributes))
assertEquals("d", key)
@@ -732,7 +487,7 @@ function TestDefine2(create, handler) {
// assertEquals(77, desc.value)
var props = {
- '11': {},
+ 'bla': {},
blub: {get: function() { return true }},
'': {get value() { return 20 }},
last: {value: 21, configurable: true, mine: "eyes"}
@@ -769,10 +524,9 @@ function TestDefineThrow(handler) {
TestWithProxies(TestDefineThrow2, handler)
}
-function TestDefineThrow2(create, handler) {
+function TestDefineThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ Object.defineProperty(p, "a", {value: 44})}, "myexn")
- assertThrows(function(){ Object.defineProperty(p, 0, {value: 44})}, "myexn")
// TODO(rossberg): These tests require for-in on proxies.
// var d1 = create({
@@ -819,14 +573,12 @@ function TestDelete(handler) {
TestWithProxies(TestDelete2, handler)
}
-function TestDelete2(create, handler) {
+function TestDelete2(handler, create) {
var p = create(handler)
assertEquals(true, delete p.a)
assertEquals("a", key)
assertEquals(true, delete p["b"])
assertEquals("b", key)
- assertEquals(true, delete p[1])
- assertEquals("1", key)
assertEquals(false, delete p.z1)
assertEquals("z1", key)
@@ -839,8 +591,6 @@ function TestDelete2(create, handler) {
assertEquals("c", key)
assertEquals(true, delete p["d"])
assertEquals("d", key)
- assertEquals(true, delete p[2])
- assertEquals("2", key)
assertThrows(function(){ delete p.z3 }, TypeError)
assertEquals("z3", key)
@@ -869,17 +619,15 @@ function TestDeleteThrow(handler) {
TestWithProxies(TestDeleteThrow2, handler)
}
-function TestDeleteThrow2(create, handler) {
+function TestDeleteThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ delete p.a }, "myexn")
assertThrows(function(){ delete p["b"] }, "myexn");
- assertThrows(function(){ delete p[3] }, "myexn");
(function() {
"use strict"
assertThrows(function(){ delete p.c }, "myexn")
assertThrows(function(){ delete p["d"] }, "myexn")
- assertThrows(function(){ delete p[4] }, "myexn");
})()
}
@@ -910,7 +658,7 @@ function TestDescriptor(handler) {
TestWithProxies(TestDescriptor2, handler)
}
-function TestDescriptor2(create, handler) {
+function TestDescriptor2(handler, create) {
var p = create(handler)
var descs = [
{configurable: true},
@@ -949,7 +697,7 @@ function TestDescriptorThrow(handler) {
TestWithProxies(TestDescriptorThrow2, handler)
}
-function TestDescriptorThrow2(create, handler) {
+function TestDescriptorThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
}
@@ -973,7 +721,7 @@ function TestComparison(eq) {
TestWithProxies(TestComparison2, eq)
}
-function TestComparison2(create, eq) {
+function TestComparison2(eq, create) {
var p1 = create({})
var p2 = create({})
@@ -1016,7 +764,7 @@ function TestIn(handler) {
TestWithProxies(TestIn2, handler)
}
-function TestIn2(create, handler) {
+function TestIn2(handler, create) {
var p = create(handler)
assertTrue("a" in p)
assertEquals("a", key)
@@ -1030,7 +778,6 @@ function TestIn2(create, handler) {
assertEquals(0, ("zzz" in p) ? 2 : 0)
assertEquals(2, !("zzz" in p) ? 2 : 0)
- // Test compilation in conditionals.
if ("b" in p) {
} else {
assertTrue(false)
@@ -1083,7 +830,7 @@ TestIn({
})
TestIn({
- has: undefined,
+ get: undefined,
getPropertyDescriptor: function(k) {
key = k; return k < "z" ? {value: 42} : void 0
}
@@ -1100,10 +847,9 @@ function TestInThrow(handler) {
TestWithProxies(TestInThrow2, handler)
}
-function TestInThrow2(create, handler) {
+function TestInThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ return "a" in o }, "myexn")
- assertThrows(function(){ return 99 in o }, "myexn")
assertThrows(function(){ return !("a" in o) }, "myexn")
assertThrows(function(){ return ("a" in o) ? 2 : 3 }, "myexn")
assertThrows(function(){ if ("b" in o) {} }, "myexn")
@@ -1130,7 +876,7 @@ TestInThrow({
})
TestInThrow({
- has: undefined,
+ get: undefined,
getPropertyDescriptor: function(k) { throw "myexn" }
})
@@ -1145,158 +891,6 @@ TestInThrow(Proxy.create({
}))
-function TestInForDerived(handler) {
- TestWithProxies(TestInForDerived2, handler)
-}
-
-function TestInForDerived2(create, handler) {
- var p = create(handler)
- var o = Object.create(p)
-
- assertTrue("a" in o)
- assertEquals("a", key)
- assertTrue(99 in o)
- assertEquals("99", key)
- assertFalse("z" in o)
- assertEquals("z", key)
-
- assertEquals(2, ("a" in o) ? 2 : 0)
- assertEquals(0, !("a" in o) ? 2 : 0)
- assertEquals(0, ("zzz" in o) ? 2 : 0)
- assertEquals(2, !("zzz" in o) ? 2 : 0)
-
- if ("b" in o) {
- } else {
- assertTrue(false)
- }
- assertEquals("b", key)
-
- if ("zz" in o) {
- assertTrue(false)
- }
- assertEquals("zz", key)
-
- if (!("c" in o)) {
- assertTrue(false)
- }
- assertEquals("c", key)
-
- if (!("zzz" in o)) {
- } else {
- assertTrue(false)
- }
- assertEquals("zzz", key)
-}
-
-TestInForDerived({
- getPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getPropertyDescriptor: function(k) {
- key = k;
- return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
- }
-})
-
-/* TODO(rossberg): this will work once we implement the newest proposal
- * regarding default traps for getPropertyDescriptor.
-TestInForDerived({
- getOwnPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getOwnPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor2(k)
- },
- getOwnPropertyDescriptor2: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getOwnPropertyDescriptor: function(k) {
- key = k;
- return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
- }
-})
-*/
-
-TestInForDerived(Proxy.create({
- get: function(pr, pk) {
- return function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
- }
-}))
-
-
-
-// Property descriptor conversion.
-
-var descget
-
-function TestDescriptorGetOrder(handler) {
- var p = Proxy.create(handler)
- var o = Object.create(p, {b: {value: 0}})
- TestDescriptorGetOrder2(function(n) { return p[n] }, "vV")
- TestDescriptorGetOrder2(function(n) { return n in p }, "")
- TestDescriptorGetOrder2(function(n) { return o[n] }, "vV")
- TestDescriptorGetOrder2(function(n) { return n in o }, "eEcCvVwWgs")
-}
-
-function TestDescriptorGetOrder2(f, access) {
- descget = ""
- assertTrue(f("a"))
- assertEquals(access, descget)
- descget = ""
- assertTrue(f(99))
- assertEquals(access, descget)
- descget = ""
- assertFalse(!!f("z"))
- assertEquals("", descget)
-}
-
-TestDescriptorGetOrder({
- getPropertyDescriptor: function(k) {
- if (k >= "z") return void 0
- // Return a proxy as property descriptor, so that we can log accesses.
- return Proxy.create({
- get: function(r, attr) {
- descget += attr[0].toUpperCase()
- return true
- },
- has: function(attr) {
- descget += attr[0]
- switch (attr) {
- case "writable":
- case "enumerable":
- case "configurable":
- case "value":
- return true
- case "get":
- case "set":
- return false
- default:
- assertUnreachable()
- }
- }
- })
- }
-})
-
-
// Own Properties (Object.prototype.hasOwnProperty).
@@ -1306,7 +900,7 @@ function TestHasOwn(handler) {
TestWithProxies(TestHasOwn2, handler)
}
-function TestHasOwn2(create, handler) {
+function TestHasOwn2(handler, create) {
var p = create(handler)
assertTrue(Object.prototype.hasOwnProperty.call(p, "a"))
assertEquals("a", key)
@@ -1364,7 +958,7 @@ function TestHasOwnThrow(handler) {
TestWithProxies(TestHasOwnThrow2, handler)
}
-function TestHasOwnThrow2(create, handler) {
+function TestHasOwnThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, "a")},
"myexn")
@@ -1412,46 +1006,34 @@ TestHasOwnThrow(Proxy.create({
// Instanceof (instanceof)
function TestInstanceof() {
- var o1 = {}
+ var o = {}
var p1 = Proxy.create({})
- var p2 = Proxy.create({}, o1)
+ var p2 = Proxy.create({}, o)
var p3 = Proxy.create({}, p2)
- var o2 = Object.create(p2)
var f0 = function() {}
- f0.prototype = o1
+ f0.prototype = o
var f1 = function() {}
f1.prototype = p1
var f2 = function() {}
f2.prototype = p2
- var f3 = function() {}
- f3.prototype = o2
-
- assertTrue(o1 instanceof Object)
- assertFalse(o1 instanceof f0)
- assertFalse(o1 instanceof f1)
- assertFalse(o1 instanceof f2)
- assertFalse(o1 instanceof f3)
+
+ assertTrue(o instanceof Object)
+ assertFalse(o instanceof f0)
+ assertFalse(o instanceof f1)
+ assertFalse(o instanceof f2)
assertFalse(p1 instanceof Object)
assertFalse(p1 instanceof f0)
assertFalse(p1 instanceof f1)
assertFalse(p1 instanceof f2)
- assertFalse(p1 instanceof f3)
assertTrue(p2 instanceof Object)
assertTrue(p2 instanceof f0)
assertFalse(p2 instanceof f1)
assertFalse(p2 instanceof f2)
- assertFalse(p2 instanceof f3)
assertTrue(p3 instanceof Object)
assertTrue(p3 instanceof f0)
assertFalse(p3 instanceof f1)
assertTrue(p3 instanceof f2)
- assertFalse(p3 instanceof f3)
- assertTrue(o2 instanceof Object)
- assertTrue(o2 instanceof f0)
- assertFalse(o2 instanceof f1)
- assertTrue(o2 instanceof f2)
- assertFalse(o2 instanceof f3)
var f = Proxy.createFunction({}, function() {})
assertTrue(f instanceof Function)
@@ -1464,57 +1046,43 @@ TestInstanceof()
// Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
function TestPrototype() {
- var o1 = {}
+ var o = {}
var p1 = Proxy.create({})
- var p2 = Proxy.create({}, o1)
+ var p2 = Proxy.create({}, o)
var p3 = Proxy.create({}, p2)
var p4 = Proxy.create({}, 666)
- var o2 = Object.create(p3)
- assertSame(Object.getPrototypeOf(o1), Object.prototype)
+ assertSame(Object.getPrototypeOf(o), Object.prototype)
assertSame(Object.getPrototypeOf(p1), null)
- assertSame(Object.getPrototypeOf(p2), o1)
+ assertSame(Object.getPrototypeOf(p2), o)
assertSame(Object.getPrototypeOf(p3), p2)
assertSame(Object.getPrototypeOf(p4), null)
- assertSame(Object.getPrototypeOf(o2), p3)
- assertTrue(Object.prototype.isPrototypeOf(o1))
+ assertTrue(Object.prototype.isPrototypeOf(o))
assertFalse(Object.prototype.isPrototypeOf(p1))
assertTrue(Object.prototype.isPrototypeOf(p2))
assertTrue(Object.prototype.isPrototypeOf(p3))
assertFalse(Object.prototype.isPrototypeOf(p4))
- assertTrue(Object.prototype.isPrototypeOf(o2))
- assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o1))
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o))
assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
- assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o2))
- assertFalse(Object.prototype.isPrototypeOf.call(o1, o1))
- assertFalse(Object.prototype.isPrototypeOf.call(o1, p1))
- assertTrue(Object.prototype.isPrototypeOf.call(o1, p2))
- assertTrue(Object.prototype.isPrototypeOf.call(o1, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(o1, p4))
- assertTrue(Object.prototype.isPrototypeOf.call(o1, o2))
+ assertFalse(Object.prototype.isPrototypeOf.call(o, o))
+ assertFalse(Object.prototype.isPrototypeOf.call(o, p1))
+ assertTrue(Object.prototype.isPrototypeOf.call(o, p2))
+ assertTrue(Object.prototype.isPrototypeOf.call(o, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(o, p4))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
- assertFalse(Object.prototype.isPrototypeOf.call(p1, o1))
+ assertFalse(Object.prototype.isPrototypeOf.call(p1, o))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
- assertFalse(Object.prototype.isPrototypeOf.call(p1, o2))
assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
- assertTrue(Object.prototype.isPrototypeOf.call(p2, o2))
assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
- assertTrue(Object.prototype.isPrototypeOf.call(p3, o2))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, o1))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, p1))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, p2))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, p4))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, o2))
var f = Proxy.createFunction({}, function() {})
assertSame(Object.getPrototypeOf(f), Function.prototype)
@@ -1529,12 +1097,12 @@ TestPrototype()
// Property names (Object.getOwnPropertyNames, Object.keys).
function TestPropertyNames(names, handler) {
- TestWithProxies(TestPropertyNames2, handler, names)
+ TestWithProxies(TestPropertyNames2, [names, handler])
}
-function TestPropertyNames2(create, handler, names) {
- var p = create(handler)
- assertArrayEquals(names, Object.getOwnPropertyNames(p))
+function TestPropertyNames2(names_handler, create) {
+ var p = create(names_handler[1])
+ assertArrayEquals(names_handler[0], Object.getOwnPropertyNames(p))
}
TestPropertyNames([], {
@@ -1561,7 +1129,7 @@ function TestPropertyNamesThrow(handler) {
TestWithProxies(TestPropertyNamesThrow2, handler)
}
-function TestPropertyNamesThrow2(create, handler) {
+function TestPropertyNamesThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ Object.getOwnPropertyNames(p) }, "myexn")
}
@@ -1577,12 +1145,12 @@ TestPropertyNamesThrow({
function TestKeys(names, handler) {
- TestWithProxies(TestKeys2, handler, names)
+ TestWithProxies(TestKeys2, [names, handler])
}
-function TestKeys2(create, handler, names) {
- var p = create(handler)
- assertArrayEquals(names, Object.keys(p))
+function TestKeys2(names_handler, create) {
+ var p = create(names_handler[1])
+ assertArrayEquals(names_handler[0], Object.keys(p))
}
TestKeys([], {
@@ -1639,7 +1207,7 @@ function TestKeysThrow(handler) {
TestWithProxies(TestKeysThrow2, handler)
}
-function TestKeysThrow2(create, handler) {
+function TestKeysThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ Object.keys(p) }, "myexn")
}
@@ -1699,6 +1267,7 @@ TestKeysThrow([], {
// Fixing (Object.freeze, Object.seal, Object.preventExtensions,
// Object.isFrozen, Object.isSealed, Object.isExtensible)
+// TODO(rossberg): use TestWithProxies to include funciton proxies
function TestFix(names, handler) {
var proto = {p: 77}
var assertFixing = function(o, s, f, e) {
@@ -1745,27 +1314,19 @@ function TestFix(names, handler) {
Object.keys(p3).sort())
assertEquals(proto, Object.getPrototypeOf(p3))
assertEquals(77, p3.p)
-
- var p = Proxy.create(handler, proto)
- var o = Object.create(p)
- assertFixing(p, false, false, true)
- assertFixing(o, false, false, true)
- Object.freeze(o)
- assertFixing(p, false, false, true)
- assertFixing(o, true, true, false)
}
TestFix([], {
fix: function() { return {} }
})
-TestFix(["a", "b", "c", "3", "zz"], {
+TestFix(["a", "b", "c", "d", "zz"], {
fix: function() {
return {
a: {value: "a", writable: true, configurable: false, enumerable: true},
b: {value: 33, writable: false, configurable: false, enumerable: true},
c: {value: 0, writable: true, configurable: true, enumerable: true},
- '3': {value: true, writable: false, configurable: true, enumerable: true},
+ d: {value: true, writable: false, configurable: true, enumerable: true},
zz: {value: 0, enumerable: false}
}
}
@@ -1816,8 +1377,8 @@ function TestFixThrow(handler) {
TestWithProxies(TestFixThrow2, handler)
}
-function TestFixThrow2(create, handler) {
- var p = create(handler, {})
+function TestFixThrow2(handler) {
+ var p = Proxy.create(handler, {})
assertThrows(function(){ Object.seal(p) }, "myexn")
assertThrows(function(){ Object.freeze(p) }, "myexn")
assertThrows(function(){ Object.preventExtensions(p) }, "myexn")
@@ -1843,135 +1404,6 @@ TestFixThrow({
})
-// Freeze a proxy in the middle of operations on it.
-// TODO(rossberg): actual behaviour not specified consistently at the moment,
-// just make sure that we do not crash.
-function TestReentrantFix(f) {
- TestWithProxies(f, Object.freeze)
- TestWithProxies(f, Object.seal)
- TestWithProxies(f, Object.preventExtensions)
-}
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get get() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while getting get trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get: function() { freeze(p); return 3 },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing get trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing default get trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return {get: function(){}} },
- fix: function() { return {} }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Freeze while getting a property from prototype.
- try { o.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get set() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while getting set trap.
- try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- set: function() { freeze(p); return true },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing set trap.
- try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getOwnPropertyDescriptor: function() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing default set trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
- fix: function() { return {} }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Freeze while setting a property in prototype, dropping the property!
- try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
- fix: function() { return {x: {get: function(){}}} }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Freeze while setting a property in prototype, making it read-only!
- try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get fix() { freeze(p); return function(){} }
- }
- var p = create(handler)
- // Freeze while getting fix trap.
- try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- fix: function() { freeze(p); return {} }
- }
- var p = create(handler)
- // Freeze while executing fix trap.
- try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
-})
-
-
// String conversion (Object.prototype.toString,
// Object.prototype.toLocaleString,
@@ -1994,13 +1426,6 @@ function TestToString(handler) {
assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
assertEquals("toString", key)
assertDoesNotThrow(function(){ Function.prototype.toString.call(f) })
-
- var o = Object.create(p)
- key = ""
- assertEquals("[object Object]", Object.prototype.toString.call(o))
- assertEquals("", key)
- assertEquals("my_proxy", Object.prototype.toLocaleString.call(o))
- assertEquals("toString", key)
}
TestToString({
@@ -2027,10 +1452,6 @@ function TestToStringThrow(handler) {
var f = Proxy.createFunction(handler, function() {})
assertEquals("[object Function]", Object.prototype.toString.call(f))
assertThrows(function(){ Object.prototype.toLocaleString.call(f) }, "myexn")
-
- var o = Object.create(p)
- assertEquals("[object Object]", Object.prototype.toString.call(o))
- assertThrows(function(){ Object.prototype.toLocaleString.call(o) }, "myexn")
}
TestToStringThrow({
@@ -2064,7 +1485,7 @@ function TestValueOf(handler) {
TestWithProxies(TestValueOf2, handler)
}
-function TestValueOf2(create, handler) {
+function TestValueOf2(handler, create) {
var p = create(handler)
assertSame(p, Object.prototype.valueOf.call(p))
}
@@ -2081,7 +1502,7 @@ function TestIsEnumerable(handler) {
TestWithProxies(TestIsEnumerable2, handler)
}
-function TestIsEnumerable2(create, handler) {
+function TestIsEnumerable2(handler, create) {
var p = create(handler)
assertTrue(Object.prototype.propertyIsEnumerable.call(p, "a"))
assertEquals("a", key)
@@ -2089,11 +1510,6 @@ function TestIsEnumerable2(create, handler) {
assertEquals("2", key)
assertFalse(Object.prototype.propertyIsEnumerable.call(p, "z"))
assertEquals("z", key)
-
- var o = Object.create(p)
- key = ""
- assertFalse(Object.prototype.propertyIsEnumerable.call(o, "a"))
- assertEquals("", key) // trap not invoked
}
TestIsEnumerable({
@@ -2130,7 +1546,7 @@ function TestIsEnumerableThrow(handler) {
TestWithProxies(TestIsEnumerableThrow2, handler)
}
-function TestIsEnumerableThrow2(create, handler) {
+function TestIsEnumerableThrow2(handler, create) {
var p = create(handler)
assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, "a") },
"myexn")
@@ -2164,3 +1580,103 @@ TestIsEnumerableThrow(Proxy.create({
return function(k) { throw "myexn" }
}
}))
+
+
+
+// Calling (call, Function.prototype.call, Function.prototype.apply,
+// Function.prototype.bind).
+
+var global = this
+var receiver
+
+function TestCall(isStrict, callTrap) {
+ assertEquals(42, callTrap(5, 37))
+// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
+// assertEquals(isStrict ? undefined : global, receiver)
+
+ var f = Proxy.createFunction({fix: function() { return {} }}, callTrap)
+ receiver = 333
+ assertEquals(42, f(11, 31))
+ assertEquals(isStrict ? undefined : global, receiver)
+ var o = {}
+ assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+ assertEquals(o, receiver)
+ assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
+ assertEquals(isStrict ? null : global, receiver)
+ assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
+ assertEquals(2, receiver.valueOf())
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+ assertEquals(o, receiver)
+ var ff = Function.prototype.bind.call(f, o, 12)
+ receiver = 333
+ assertEquals(42, ff(30))
+ assertEquals(o, receiver)
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+ assertEquals(o, receiver)
+
+ Object.freeze(f)
+ receiver = 333
+ assertEquals(42, f(11, 31))
+// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
+// assertEquals(isStrict ? undefined : global, receiver)
+ receiver = 333
+ assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+ assertEquals(o, receiver)
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+ assertEquals(o, receiver)
+ receiver = 333
+ assertEquals(42, ff(30))
+ assertEquals(o, receiver)
+ receiver = 333
+ assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+ assertEquals(o, receiver)
+}
+
+TestCall(false, function(x, y) {
+ receiver = this; return x + y
+})
+
+TestCall(true, function(x, y) {
+ "use strict";
+ receiver = this; return x + y
+})
+
+TestCall(false, Proxy.createFunction({}, function(x, y) {
+ receiver = this; return x + y
+}))
+
+TestCall(true, Proxy.createFunction({}, function(x, y) {
+ "use strict";
+ receiver = this; return x + y
+}))
+
+var p = Proxy.createFunction({fix: function() {return {}}}, function(x, y) {
+ receiver = this; return x + y
+})
+TestCall(false, p)
+Object.freeze(p)
+TestCall(false, p)
+
+
+function TestCallThrow(callTrap) {
+ var f = Proxy.createFunction({fix: function() {return {}}}, callTrap)
+ assertThrows(function(){ f(11) }, "myexn")
+ assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+ assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+
+ Object.freeze(f)
+ assertThrows(function(){ f(11) }, "myexn")
+ assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+ assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+}
+
+TestCallThrow(function() { throw "myexn" })
+TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+
+var p = Proxy.createFunction(
+ {fix: function() {return {}}}, function() { throw "myexn" })
+Object.freeze(p)
+TestCallThrow(p)
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 941e0e8cc5..027da584b4 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -35,11 +35,6 @@ bugs: FAIL
regress/regress-1119: FAIL
##############################################################################
-
-# NewGC: BUG(1719) slow to collect arrays over several contexts.
-regress/regress-524: SKIP
-
-##############################################################################
# Too slow in debug mode with --stress-opt
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
compiler/regress-funcaller: PASS, SKIP if $mode == debug
@@ -65,6 +60,7 @@ regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
debug-liveedit-check-stack: SKIP
debug-liveedit-patch-positions-replace: SKIP
+
##############################################################################
[ $arch == arm ]
diff --git a/deps/v8/test/mjsunit/compiler/regress-96989.js b/deps/v8/test/mjsunit/regress/regress-100409.js
index aedeb24318..c29250f28d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-96989.js
+++ b/deps/v8/test/mjsunit/regress/regress-100409.js
@@ -25,19 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
// Flags: --allow-natives-syntax
-// Test correct handling of uninitialized const.
+function outer () {
+ var val = 0;
-function test() {
- for (var i = 41; i < 42; i++) {
- var c = t ^ i;
+ function foo () {
+ val = 0;
+ val;
+ var z = false;
+ var y = true;
+ if (!z) {
+ while (z = !z) {
+ if (y) val++;
+ }
+ }
+ return val++;
}
- const t;
- return c;
+
+ return foo;
}
-for (var i=0; i<10; i++) test();
-%OptimizeFunctionOnNextCall(test);
-assertEquals(41, test());
+
+var foo = outer();
+
+assertEquals(1, foo());
+assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-1170.js b/deps/v8/test/mjsunit/regress/regress-1170.js
index 66ed9f29e2..95684c5418 100644
--- a/deps/v8/test/mjsunit/regress/regress-1170.js
+++ b/deps/v8/test/mjsunit/regress/regress-1170.js
@@ -49,7 +49,7 @@ try {
exception = true;
assertTrue(/TypeError/.test(e));
}
-assertFalse(exception);
+assertTrue(exception);
exception = false;
try {
diff --git a/deps/v8/test/mjsunit/regress/regress-1213575.js b/deps/v8/test/mjsunit/regress/regress-1213575.js
index f3a11dbaab..9d82064e47 100644
--- a/deps/v8/test/mjsunit/regress/regress-1213575.js
+++ b/deps/v8/test/mjsunit/regress/regress-1213575.js
@@ -25,16 +25,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Make sure that a const definition does not try
-// to pass 'the hole' to a defined setter.
+// Make sure that a const definition always
+// conflicts with a defined setter. This avoid
+// trying to pass 'the hole' to the setter.
-this.__defineSetter__('x', function(value) { assertTrue(value === 1); });
+this.__defineSetter__('x', function(value) { assertTrue(false); });
var caught = false;
try {
- eval('const x = 1');
+ eval('const x');
} catch(e) {
assertTrue(e instanceof TypeError);
caught = true;
}
-assertFalse(caught);
+assertTrue(caught);
diff --git a/deps/v8/test/mjsunit/regress/regress-1217.js b/deps/v8/test/mjsunit/regress/regress-1217.js
deleted file mode 100644
index 6530549864..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1217.js
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Check that RegExp.prototype is itself a RegExp object.
-
-var proto = RegExp.prototype;
-assertEquals("[object RegExp]", Object.prototype.toString.call(proto));
-
-assertEquals("", proto.source);
-assertEquals(false, proto.global);
-assertEquals(false, proto.multiline);
-assertEquals(false, proto.ignoreCase);
-assertEquals(0, proto.lastIndex);
-
-assertEquals("/(?:)/", proto.toString());
-
-var execResult = proto.exec("argle");
-assertEquals(1, execResult.length);
-assertEquals("", execResult[0]);
-assertEquals("argle", execResult.input);
-assertEquals(0, execResult.index);
-
-assertTrue(proto.test("argle"));
-
-// We disallow re-compiling the RegExp.prototype object.
-assertThrows(function(){ proto.compile("something"); }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1415.js b/deps/v8/test/mjsunit/regress/regress-1415.js
deleted file mode 100644
index f993e9b3d8..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1415.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Surrogate pair range.
-// U+D800
-assertThrows(function(){ decodeURIComponent("%ED%A0%80"); }, URIError);
-// U+DBFF
-assertThrows(function(){ decodeURIComponent("%ED%AF%BF"); }, URIError);
-// U+DC00
-assertThrows(function(){ decodeURIComponent("%ED%B0%80"); }, URIError);
-// U+DFFF
-assertThrows(function(){ decodeURIComponent("%ED%BF%BF"); }, URIError);
-
-// Overlong encodings
-// U+007F in two bytes.
-assertThrows(function(){ decodeURIComponent("%C1%BF"); }, URIError);
-// U+07FF in three bytes.
-assertThrows(function(){ decodeURIComponent("%E0%9F%BF"); }, URIError);
diff --git a/deps/v8/test/mjsunit/regress/regress-1521.js b/deps/v8/test/mjsunit/regress/regress-1521.js
index 415db67803..3149f05a5e 100644
--- a/deps/v8/test/mjsunit/regress/regress-1521.js
+++ b/deps/v8/test/mjsunit/regress/regress-1521.js
@@ -24,6 +24,8 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax
// Optimized variable access inside through a catch context should work.
function test(x) {
@@ -44,4 +46,3 @@ function test(x) {
}
test(3);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-1639-2.js b/deps/v8/test/mjsunit/regress/regress-1639-2.js
deleted file mode 100644
index c439dd8fff..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1639-2.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-function sendCommand(state, cmd) {
- // Get the debug command processor in paused state.
- var dcp = state.debugCommandProcessor(false);
- var request = JSON.stringify(cmd);
- var response = dcp.processDebugJSONRequest(request);
-}
-
-var state = 0;
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- var line = event_data.sourceLineText();
- print('break: ' + line);
- print('event data: ' + event_data.toJSONProtocol());
- print();
- assertEquals('// BREAK', line.substr(-8),
- "should not break outside evaluate");
-
- switch (state) {
- case 0:
- state = 1;
- // While in the debugger and stepping through a set of instructions
- // executed in the evaluate command, the stepping must stop at the end
- // of the said set of instructions and not step further into native
- // debugger code.
- sendCommand(exec_state, {
- seq : 0,
- type : "request",
- command : "evaluate",
- arguments : {
- 'expression' : 'print("A"); debugger; print("B"); // BREAK',
- 'global' : true
- }
- });
- break;
- case 1:
- sendCommand(exec_state, {
- seq : 0,
- type : "request",
- command : "continue",
- arguments : {
- stepaction : "next"
- }
- });
- break;
- }
- }
- } catch (e) {
- print(e);
- }
-}
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-function a() {
-} // BREAK
-
-// Set a break point and call to invoke the debug event listener.
-Debug.setBreakPoint(a, 0, 0);
-a();
diff --git a/deps/v8/test/mjsunit/regress/regress-1692.js b/deps/v8/test/mjsunit/regress/regress-1692.js
deleted file mode 100644
index 06bd66cf7f..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1692.js
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test that Object.prototype.propertyIsEnumerable handles array indices
-// correctly.
-
-var p = Object.create({}, {
- a : { value : 42, enumerable : true },
- b : { value : 42, enumerable : false },
- 1 : { value : 42, enumerable : true },
- 2 : { value : 42, enumerable : false },
- f : { get: function(){}, enumerable: true },
- g : { get: function(){}, enumerable: false },
- 11 : { get: function(){}, enumerable: true },
- 12 : { get: function(){}, enumerable: false }
-});
-var o = Object.create(p, {
- c : { value : 42, enumerable : true },
- d : { value : 42, enumerable : false },
- 3 : { value : 42, enumerable : true },
- 4 : { value : 42, enumerable : false },
- h : { get: function(){}, enumerable: true },
- k : { get: function(){}, enumerable: false },
- 13 : { get: function(){}, enumerable: true },
- 14 : { get: function(){}, enumerable: false }
-});
-
-// Inherited properties are ignored.
-assertFalse(o.propertyIsEnumerable("a"));
-assertFalse(o.propertyIsEnumerable("b"));
-assertFalse(o.propertyIsEnumerable("1"));
-assertFalse(o.propertyIsEnumerable("2"));
-
-// Own properties.
-assertTrue(o.propertyIsEnumerable("c"));
-assertFalse(o.propertyIsEnumerable("d"));
-assertTrue(o.propertyIsEnumerable("3"));
-assertFalse(o.propertyIsEnumerable("4"));
-
-// Inherited accessors.
-assertFalse(o.propertyIsEnumerable("f"));
-assertFalse(o.propertyIsEnumerable("g"));
-assertFalse(o.propertyIsEnumerable("11"));
-assertFalse(o.propertyIsEnumerable("12"));
-
-// Own accessors.
-assertTrue(o.propertyIsEnumerable("h"));
-assertFalse(o.propertyIsEnumerable("k"));
-assertTrue(o.propertyIsEnumerable("13"));
-assertFalse(o.propertyIsEnumerable("14"));
-
-// Nonexisting properties.
-assertFalse(o.propertyIsEnumerable("xxx"));
-assertFalse(o.propertyIsEnumerable("999"));
-
-// String object properties.
-var o = Object("string");
-// Non-string property on String object.
-o[10] = 42;
-assertTrue(o.propertyIsEnumerable(10));
-assertFalse(o.propertyIsEnumerable(0));
-
-// Fast elements.
-var o = [1,2,3,4,5];
-assertTrue(o.propertyIsEnumerable(3));
diff --git a/deps/v8/test/mjsunit/regress/regress-1708.js b/deps/v8/test/mjsunit/regress/regress-1708.js
deleted file mode 100644
index ab50e07864..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1708.js
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Regression test of a very rare corner case where left-trimming an
-// array caused invalid marking bit patterns on lazily swept pages.
-
-// Flags: --expose-gc --noincremental-marking --max-new-space-size 1000
-
-(function() {
- var head = new Array(1);
- var tail = head;
-
- // Fill heap to increase old-space size and trigger lazy sweeping on
- // some of the old-space pages.
- for (var i = 0; i < 200; i++) {
- tail[1] = new Array(1000);
- tail = tail[1];
- }
- array = new Array(100);
- gc(); gc();
-
- // At this point "array" should have been promoted to old-space and be
- // located in a lazy swept page with intact marking bits. Now shift
- // the array to trigger left-trimming operations.
- assertEquals(100, array.length);
- for (var i = 0; i < 50; i++) {
- array.shift();
- }
- assertEquals(50, array.length);
-
- // At this point "array" should have been trimmed from the left with
- // marking bits being correctly transfered to the new object start.
- // Scavenging operations cause lazy sweeping to advance and verify
- // that marking bit patterns are still sane.
- for (var i = 0; i < 200; i++) {
- tail[1] = new Array(1000);
- tail = tail[1];
- }
-})();
diff --git a/deps/v8/test/mjsunit/regress/regress-1711.js b/deps/v8/test/mjsunit/regress/regress-1711.js
deleted file mode 100644
index 15591b1e01..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1711.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// string.split needs to evaluate the separator's toString even if limit
-// is 0 because toString may have side effects.
-
-var side_effect = false;
-var separator = new Object();
-separator.toString = function() {
- side_effect = true;
- return undefined;
-}
-'subject'.split(separator, 0);
-assertTrue(side_effect);
diff --git a/deps/v8/test/mjsunit/regress/regress-1713.js b/deps/v8/test/mjsunit/regress/regress-1713.js
deleted file mode 100644
index 0af1144a15..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1713.js
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --always-compact --expose-gc
-
-var O = { get f() { return 0; } };
-
-var CODE = [];
-
-var R = [];
-
-function Allocate4Kb(N) {
- var arr = [];
- do {arr.push(new Array(1024));} while (--N > 0);
- return arr;
-}
-
-function AllocateXMb(X) {
- return Allocate4Kb((1024 * X) / 4);
-}
-
-function Node(v, next) { this.v = v; this.next = next; }
-
-Node.prototype.execute = function (O) {
- var n = this;
- while (n.next !== null) n = n.next;
- n.v(O);
-};
-
-function LongList(N, x) {
- if (N == 0) return new Node(x, null);
- return new Node(new Array(1024), LongList(N - 1, x));
-}
-
-var L = LongList(1024, function (O) {
- for (var i = 0; i < 5; i++) O.f;
-});
-
-
-
-function Incremental(O, x) {
- if (!x) {
- return;
- }
- function CreateCode(i) {
- var f = new Function("return O.f_" + i);
- CODE.push(f);
- f(); // compile
- f(); // compile
- f(); // compile
- }
-
- for (var i = 0; i < 1e4; i++) CreateCode(i);
- gc();
- gc();
- gc();
-
- print(">>> 1 <<<");
-
- L.execute(O);
-
- try {} catch (e) {}
-
- L = null;
- print(">>> 2 <<<");
- AllocateXMb(8);
- //rint("1");
- //llocateXMb(8);
- //rint("1");
- //llocateXMb(8);
-
-}
-
-function foo(O, x) {
- Incremental(O, x);
-
- print('f');
-
- for (var i = 0; i < 5; i++) O.f;
-
-
- print('g');
-
- bar(x);
-}
-
-function bar(x) {
- if (!x) return;
- %DeoptimizeFunction(foo);
- AllocateXMb(8);
- AllocateXMb(8);
-}
-
-var O1 = {};
-var O2 = {};
-var O3 = {};
-var O4 = {f:0};
-
-foo(O1, false);
-foo(O2, false);
-foo(O3, false);
-%OptimizeFunctionOnNextCall(foo);
-foo(O4, true);
diff --git a/deps/v8/test/mjsunit/regress/regress-1748.js b/deps/v8/test/mjsunit/regress/regress-1748.js
deleted file mode 100644
index e287e55496..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1748.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test that /^/ only matches at beginning of string.
-// Bug in x64 caused it to match when executing the RegExp on a part
-// of a string that starts at a multiplum of 256.
-
-var str = Array(10000).join("X");
-str.replace(/^|X/g, function(m, i, s) {
- if (i > 0) assertEquals("X", m, "at position 0x" + i.toString(16));
-}); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-1757.js b/deps/v8/test/mjsunit/regress/regress-1757.js
deleted file mode 100644
index f7a5516cac..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1757.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --string-slices --expose-externalize-string
-
-var a = "abcdefghijklmnopqrstuvqxy"+"z";
-externalizeString(a, true);
-assertEquals('b', a.substring(1).charAt(0)); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-877615.js b/deps/v8/test/mjsunit/regress/regress-877615.js
index bec5a4d1b8..d35aba62d3 100644
--- a/deps/v8/test/mjsunit/regress/regress-877615.js
+++ b/deps/v8/test/mjsunit/regress/regress-877615.js
@@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Number.prototype.toLocaleString = function() { return 'invalid'; };
-assertEquals('invalid', [1].toLocaleString()); // invalid
+Number.prototype.toLocaleString = function() { return 'invalid'};
+assertEquals([1].toLocaleString(), 'invalid'); // invalid
Number.prototype.toLocaleString = 'invalid';
-assertThrows(function() { [1].toLocaleString(); }); // Not callable.
+assertEquals([1].toLocaleString(), '1'); // 1
-delete Number.prototype.toLocaleString;
Number.prototype.toString = function() { return 'invalid' };
-assertEquals([1].toLocaleString(), 'invalid'); // Uses ToObject on elements.
-assertEquals([1].toString(), '1'); // Uses ToString directly on elements.
+assertEquals([1].toLocaleString(), '1'); // 1
+assertEquals([1].toString(), '1'); // 1
+
diff --git a/deps/v8/test/mjsunit/regress/regress-94873.js b/deps/v8/test/mjsunit/regress/regress-94873.js
deleted file mode 100644
index 41ca9921c6..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-94873.js
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug;
-
-function sendCommand(state, cmd) {
- // Get the debug command processor in paused state.
- var dcp = state.debugCommandProcessor(false);
- var request = JSON.stringify(cmd);
- var response = dcp.processDebugJSONRequest(request);
- return JSON.parse(response);
-}
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- var line = event_data.sourceLineText();
- print('break: ' + line);
-
- var frame = sendCommand(exec_state, {
- seq: 0,
- type: "request",
- command: "frame"
- });
-
- sendCommand(exec_state, {
- seq: 0,
- type: "request",
- command: "evaluate",
- arguments: {
- expression: "obj.x.toString()",
- additional_context: [{
- name: "obj",
- handle: frame.body.receiver.ref
- }]
- }
- });
- }
- } catch (e) {
- print(e);
- }
-}
-
-Debug.setListener(listener);
-
-function a(x, y) {
- this.x = x;
- this.y = y;
-}
-
-Debug.setBreakPoint(a, 0, 0);
-new a(1, 2); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-98773.js b/deps/v8/test/mjsunit/regress/regress-98773.js
deleted file mode 100644
index eb24eb5d1e..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-98773.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Calling Array.sort on an external array is not supposed to crash.
-
-var array = new Int16Array(23);
-array[7] = 7; array[9] = 9;
-assertEquals(23, array.length);
-assertEquals(7, array[7]);
-assertEquals(9, array[9]);
-
-Array.prototype.sort.call(array);
-assertEquals(23, array.length);
-assertEquals(7, array[21]);
-assertEquals(9, array[22]);
diff --git a/deps/v8/test/mjsunit/regress/regress-99167.js b/deps/v8/test/mjsunit/regress/regress-99167.js
deleted file mode 100644
index 5053ae5d24..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-99167.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc --max-new-space-size=1024
-
-eval("function Node() { this.a = 1; this.a = 3; }");
-new Node;
-for (var i = 0; i < 4; ++i) gc();
-for (var i = 0; i < 100000; ++i) new Node;
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-gc.js b/deps/v8/test/mjsunit/regress/regress-deopt-gc.js
index a74e2c5ea4..7b7c29a31e 100644
--- a/deps/v8/test/mjsunit/regress/regress-deopt-gc.js
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-gc.js
@@ -42,7 +42,7 @@ function deopt() {
// Make sure we don't inline this function
try { var a = 42; } catch(o) {};
%DeoptimizeFunction(opt_me);
- gc();
+ gc(true);
}
diff --git a/deps/v8/test/mjsunit/regress/short-circuit.js b/deps/v8/test/mjsunit/regress/short-circuit.js
deleted file mode 100644
index 25363d6b31..0000000000
--- a/deps/v8/test/mjsunit/regress/short-circuit.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var arr = [];
-
-for (var i = 0; i < 28000; i++) {
- arr.push(new RegExp("prefix" + i.toString() + i.toString() + i.toString()));
-}
diff --git a/deps/v8/test/mjsunit/string-slices-regexp.js b/deps/v8/test/mjsunit/string-slices-regexp.js
index df01574d83..a8cadaedd5 100644
--- a/deps/v8/test/mjsunit/string-slices-regexp.js
+++ b/deps/v8/test/mjsunit/string-slices-regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/deps/v8/test/mjsunit/string-slices.js b/deps/v8/test/mjsunit/string-slices.js
index 7c40229c75..8cc1f81e77 100755
--- a/deps/v8/test/mjsunit/string-slices.js
+++ b/deps/v8/test/mjsunit/string-slices.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -189,16 +189,11 @@ assertEquals("\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9abcdefghijklmnop",
assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
utf.substring(5,1) + utf.substring(3,7));
+/*
// Externalizing strings.
-var a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
-var b = "23456789qwertyuiopasdfghjklzxcvbn"
+var a = "123456789qwertyuiopasdfghjklzxcvbnm";
+var b = a.slice(1,-1);
assertEquals(a.slice(1,-1), b);
-
-assertTrue(isAsciiString(a));
-externalizeString(a, true);
-assertFalse(isAsciiString(a));
-
+externalizeString(a);
assertEquals(a.slice(1,-1), b);
-assertTrue(/3456789qwe/.test(a));
-assertEquals(5, a.indexOf("678"));
-assertEquals("12345", a.split("6")[0]);
+*/
diff --git a/deps/v8/test/mjsunit/undeletable-functions.js b/deps/v8/test/mjsunit/undeletable-functions.js
index bbb798f351..04fd06068d 100644
--- a/deps/v8/test/mjsunit/undeletable-functions.js
+++ b/deps/v8/test/mjsunit/undeletable-functions.js
@@ -76,8 +76,6 @@ array = [
"execScript"];
CheckEcmaSemantics(this, array, "Global");
CheckReadOnlyAttr(this, "Infinity");
-CheckReadOnlyAttr(this, "NaN");
-CheckReadOnlyAttr(this, "undefined");
array = ["exec", "test", "toString", "compile"];
CheckEcmaSemantics(RegExp.prototype, array, "RegExp prototype");
@@ -191,7 +189,7 @@ function CheckReadOnlyAttr(type, prop) {
assertFalse(deleted, "delete operator returned true: " + prop);
assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
type[prop] = "foo";
- assertEquals(old, type[prop], "overwritable: " + prop);
+ assertEquals("foo", type[prop], "overwritable: " + prop);
}
print("OK");
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 6a5c08640c..3a27130990 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -69,6 +69,7 @@ js1_5/Array/regress-465980-02: SKIP
ecma_3/Date/15.9.3.2-1: SKIP
js1_2/function/Number: SKIP
+
##################### SLOW TESTS #####################
# This takes a long time to run (~100 seconds). It should only be run
@@ -226,7 +227,7 @@ ecma/String/15.5.4.12-4: FAIL_OK
ecma/String/15.5.4.12-5: FAIL_OK
# Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: SKIP
+js1_5/Regress/regress-312588: FAIL || TIMEOUT
# Runs out of memory because it compiles huge functions.
@@ -618,10 +619,6 @@ js1_5/Expressions/regress-394673: FAIL
# We do not correctly handle assignments within "with"
/ecma_3/Statements/12.10-01: FAIL
-# We do not throw an exception when a const is redeclared.
-# (We only fail section 1 of the test.)
-js1_5/Regress/regress-103602: FAIL
-
##################### MOZILLA EXTENSION TESTS #####################
ecma/extensions/15.1.2.1-1: FAIL_OK
diff --git a/deps/v8/test/preparser/strict-identifiers.pyt b/deps/v8/test/preparser/strict-identifiers.pyt
index aa3d5210d8..72808e25bf 100644
--- a/deps/v8/test/preparser/strict-identifiers.pyt
+++ b/deps/v8/test/preparser/strict-identifiers.pyt
@@ -138,38 +138,6 @@ setter_arg = StrictTemplate("setter-param-$id", """
var x = {set foo($id) { }};
""")
-label_normal = Template("label-normal-$id", """
- $id: '';
-""")
-
-label_strict = StrictTemplate("label-strict-$id", """
- $id: '';
-""")
-
-break_normal = Template("break-normal-$id", """
- for (;;) {
- break $id;
- }
-""")
-
-break_strict = StrictTemplate("break-strict-$id", """
- for (;;) {
- break $id;
- }
-""")
-
-continue_normal = Template("continue-normal-$id", """
- for (;;) {
- continue $id;
- }
-""")
-
-continue_strict = StrictTemplate("continue-strict-$id", """
- for (;;) {
- continue $id;
- }
-""")
-
non_strict_use = Template("nonstrict-$id", """
var $id = 42;
$id++;
@@ -194,7 +162,6 @@ non_strict_use = Template("nonstrict-$id", """
function $id($id) { }
x = {$id: 42};
x = {get $id() {}, set $id(value) {}};
- $id: '';
""")
identifier_name_source = """
@@ -230,12 +197,6 @@ for id in ["eval", "arguments"]:
prefix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_prefix")
postfix_var({"id": id, "op":"++", "opname":"inc"}, "strict_lhs_postfix")
postfix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_postfix")
- label_normal({"id": id}, None)
- label_strict({"id": id}, None)
- break_normal({"id": id}, None)
- break_strict({"id": id}, None)
- continue_normal({"id": id}, None)
- continue_strict({"id": id}, None)
non_strict_use({"id": id}, None)
@@ -244,13 +205,10 @@ for id in ["eval", "arguments"]:
for reserved_word in reserved_words + strict_reserved_words:
if (reserved_word in strict_reserved_words):
message = "strict_reserved_word"
- label_message = None
elif (reserved_word == "const"):
message = "unexpected_token"
- label_message = message
else:
message = "reserved_word"
- label_message = message
arg_name_own({"id":reserved_word}, message)
arg_name_nested({"id":reserved_word}, message)
setter_arg({"id": reserved_word}, message)
@@ -267,19 +225,6 @@ for reserved_word in reserved_words + strict_reserved_words:
read_var({"id": reserved_word}, message)
identifier_name({"id": reserved_word}, None);
identifier_name_strict({"id": reserved_word}, None);
- label_normal({"id": reserved_word}, label_message)
- break_normal({"id": reserved_word}, label_message)
- continue_normal({"id": reserved_word}, label_message)
- if (reserved_word == "const"):
- # The error message for this case is different because
- # ParseLabelledStatementOrExpression will try to parse this as an expression
- # first, effectively disallowing the use in ParseVariableDeclarations, i.e.
- # the preparser never sees that 'const' was intended to be a label.
- label_strict({"id": reserved_word}, "strict_const")
- else:
- label_strict({"id": reserved_word}, message)
- break_strict({"id": reserved_word}, message)
- continue_strict({"id": reserved_word}, message)
# Future reserved words in strict mode behave like normal identifiers
diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status
index 99db598af4..868509d7c5 100644
--- a/deps/v8/test/sputnik/sputnik.status
+++ b/deps/v8/test/sputnik/sputnik.status
@@ -52,9 +52,6 @@ S15.10.6.2_A1_T16: FAIL_OK
S15.10.6.2_A12: FAIL_OK
S15.10.6.3_A1_T16: FAIL_OK
-# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp.
-S15.10.6_A2: FAIL_OK
-
# We are silent in some regexp cases where the spec wants us to give
# errors, for compatibility.
S15.10.2.11_A1_T2: FAIL
@@ -179,19 +176,6 @@ S15.5.4.13_A1_T3: FAIL_OK
S15.5.4.14_A1_T3: FAIL_OK
S15.5.4.15_A1_T3: FAIL_OK
-# NaN, Infinity and undefined are read-only according to ES5.
-S15.1.1.1_A2_T1: FAIL_OK # NaN
-S15.1.1.1_A2_T2: FAIL_OK # NaN
-S15.1.1.2_A2_T1: FAIL_OK # Infinity
-# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45.
-S15.1.1.3_A2_T1: FAIL_OK # undefined
-S15.1.1.3_A2_T2: FAIL_OK # undefined
-
-# Array.prototype.to[Locale]String is generic in ES5.
-S15.4.4.2_A2_T1: FAIL_OK
-S15.4.4.3_A2_T1: FAIL_OK
-
-
##################### SKIPPED TESTS #####################
# These tests take a looong time to run in debug mode.
diff --git a/deps/v8/test/test262/README b/deps/v8/test/test262/README
index 094356fcf0..ea6b4a71a6 100644
--- a/deps/v8/test/test262/README
+++ b/deps/v8/test/test262/README
@@ -4,11 +4,11 @@ tests from
http://hg.ecmascript.org/tests/test262
-at revision 271 as 'data' in this directory. Using later version
+at revision 128 as 'data' in this directory. Using later version
may be possible but the tests are only known to pass (and indeed run)
with that revision.
-hg clone -r 271 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 128 http://hg.ecmascript.org/tests/test262 data
If you do update to a newer revision you may have to change the test
harness adapter code since it uses internal functionality from the
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 1a619547d7..8cee210763 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -25,323 +25,98 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-prefix test262
-def FAIL_OK = FAIL, OKAY
-############################### BUGS ###################################
+#
+# ietestcenter tests.
+#
-# A bound function should fail on access to 'caller' and 'arguments'.
-S15.3.4.5_A1: FAIL
-S15.3.4.5_A2: FAIL
+prefix ietestcenter
-# '__proto__' should be treated as a normal property in JSON.
-S15.12.2_A1: FAIL
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1196
-S8.7_A5_T2: FAIL
+#
+# Deliberate differences for compatibility with other browsers
+#
+# 15.9.5.43-0-9 and 15.9.5.43-0-10. V8 doesn't throw RangeError
+# from Date.prototype.toISOString when string is not a finite number.
+# This is compatible with Firefox and Safari.
+15.9.5.43-0-9: PASS || FAIL
+15.9.5.43-0-10: PASS || FAIL
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1624
-S10.4.2.1_A1: FAIL
+#
+# Unanalyzed failures which may be bugs or deliberate differences
+#
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1752
-S11.8.2_A2.3_T1: FAIL
-S11.8.3_A2.3_T1: FAIL
+# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
+# Property of the Global Object under strict mode (NaN)
+10.2.1.1.3-4-16-s: FAIL
+# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
+# Property of the Global Object under strict mode (undefined)
+10.2.1.1.3-4-18-s: FAIL
+# Invalid test: https://bugs.ecmascript.org/show_bug.cgi?id=76
+10.4.2-2-c-1: FAIL
+# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
+# when using Greater-than operator: valueOf > valueOf
11.8.2-1: FAIL
+# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
+# when using Greater-than operator: valueOf > toString
11.8.2-2: FAIL
+# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
+# when using Greater-than operator: toString > valueOf
11.8.2-3: FAIL
+# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
+# when using Greater-than operator: toString > toString
11.8.2-4: FAIL
+# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
+# enforced when using Less-than-or-equal operator: valueOf <= valueOf
11.8.3-1: FAIL
+# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
+# enforced when using Less-than-or-equal operator: valueOf <= toString
11.8.3-2: FAIL
+# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
+# enforced when using Less-than-or-equal operator: toString <= valueOf
11.8.3-3: FAIL
+# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
+# enforced when using Less-than-or-equal operator: toString <= toString
11.8.3-4: FAIL
+# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
+# enforced when using Less-than-or-equal operator: valueOf <= valueOf
11.8.3-5: FAIL
-
-# V8 Bug.
-S13.2.3_A1: FAIL
-
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1530
-S15.3.3.1_A4: FAIL
-
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1756
-15.2.3.6-4-116: FAIL
-15.2.3.6-4-117: FAIL
-15.2.3.6-4-126: FAIL
-15.2.3.6-4-127: FAIL
-15.2.3.6-4-128: FAIL
-15.2.3.6-4-129: FAIL
-15.2.3.6-4-130: FAIL
-15.2.3.6-4-131: FAIL
-15.2.3.6-4-132: FAIL
-15.2.3.6-4-137: FAIL
-15.2.3.6-4-142: FAIL
-15.2.3.6-4-143: FAIL
-15.2.3.6-4-144: FAIL
-15.2.3.6-4-146: FAIL
-15.2.3.6-4-147: FAIL
-15.2.3.6-4-148: FAIL
-15.2.3.6-4-149: FAIL
-15.2.3.6-4-151: FAIL
-15.2.3.6-4-154: FAIL
-15.2.3.6-4-155: FAIL
-15.2.3.6-4-159: FAIL
-15.2.3.6-4-161: FAIL
-15.2.3.6-4-165: FAIL
-15.2.3.6-4-166: FAIL
-15.2.3.6-4-167: FAIL
-15.2.3.6-4-168: FAIL
-15.2.3.6-4-169: FAIL
-15.2.3.6-4-170: FAIL
-15.2.3.6-4-171: FAIL
-15.2.3.6-4-172: FAIL
-15.2.3.6-4-173: FAIL
-15.2.3.6-4-174: FAIL
-15.2.3.6-4-175: FAIL
-15.2.3.6-4-176: FAIL
-15.2.3.6-4-177: FAIL
-15.2.3.6-4-178: FAIL
-15.2.3.6-4-179-1: FAIL
-15.2.3.6-4-181: FAIL
-15.2.3.7-6-a-112: FAIL
-15.2.3.7-6-a-113: FAIL
-15.2.3.7-6-a-122: FAIL
-15.2.3.7-6-a-123: FAIL
-15.2.3.7-6-a-124: FAIL
-15.2.3.7-6-a-125: FAIL
-15.2.3.7-6-a-126: FAIL
-15.2.3.7-6-a-127: FAIL
-15.2.3.7-6-a-128: FAIL
-15.2.3.7-6-a-133: FAIL
-15.2.3.7-6-a-138: FAIL
-15.2.3.7-6-a-139: FAIL
-15.2.3.7-6-a-140: FAIL
-15.2.3.7-6-a-142: FAIL
-15.2.3.7-6-a-143: FAIL
-15.2.3.7-6-a-144: FAIL
-15.2.3.7-6-a-145: FAIL
-15.2.3.7-6-a-147: FAIL
-15.2.3.7-6-a-150: FAIL
-15.2.3.7-6-a-151: FAIL
-15.2.3.7-6-a-155: FAIL
-15.2.3.7-6-a-157: FAIL
-15.2.3.7-6-a-161: FAIL
-15.2.3.7-6-a-162: FAIL
-15.2.3.7-6-a-163: FAIL
-15.2.3.7-6-a-164: FAIL
-15.2.3.7-6-a-165: FAIL
-15.2.3.7-6-a-166: FAIL
-15.2.3.7-6-a-167: FAIL
-15.2.3.7-6-a-168: FAIL
-15.2.3.7-6-a-169: FAIL
-15.2.3.7-6-a-170: FAIL
-15.2.3.7-6-a-171: FAIL
-15.2.3.7-6-a-172: FAIL
-15.2.3.7-6-a-173: FAIL
-15.2.3.7-6-a-174: FAIL
-15.2.3.7-6-a-175: FAIL
-15.2.3.7-6-a-176: FAIL
-15.2.3.7-6-a-177: FAIL
-
-# Invalid test cases (recent change adding var changes semantics)
-S8.3_A1_T1: FAIL
-S15.3_A3_T1: FAIL
-S15.3_A3_T3: FAIL
-
-##################### DELIBERATE INCOMPATIBILITIES #####################
-
-# 15.9.5.43-0-9 and 15.9.5.43-0-10. V8 doesn't throw RangeError
-# from Date.prototype.toISOString when string is not a finite number.
-# This is compatible with Firefox and Safari.
-15.9.5.43-0-9: PASS || FAIL
-15.9.5.43-0-10: PASS || FAIL
-
-# We deliberately treat arguments to parseInt() with a leading zero as
-# octal numbers in order to not break the web.
-S15.1.2.2_A5.1_T1: FAIL_OK
-
-# This tests precision of trignometric functions. We're slightly off
-# from the implementation in libc (~ 1e-17) but it's not clear if we
-# or they are closer to the right answer, or if it even matters.
-S15.8.2.16_A7: PASS || FAIL_OK
-S15.8.2.18_A7: PASS || FAIL_OK
-S15.8.2.13_A23: PASS || FAIL_OK
-
-# We allow calls to regexp exec() with no arguments to fail for
-# compatibility reasons.
-S15.10.6.2_A1_T16: FAIL_OK
-S15.10.6.2_A12: FAIL_OK
-S15.10.6.3_A1_T16: FAIL_OK
-
-# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp.
-S15.10.6_A2: FAIL_OK
-
-# We are silent in some regexp cases where the spec wants us to give
-# errors, for compatibility.
-S15.10.2.11_A1_T2: FAIL
-S15.10.2.11_A1_T3: FAIL
-
-# We are more lenient in which string character escapes we allow than
-# the spec (7.8.4 p. 19) wants us to be. This is for compatibility.
-S7.8.4_A4.3_T3: FAIL_OK
-S7.8.4_A4.3_T4: FAIL_OK
-S7.8.4_A4.3_T5: FAIL_OK
-S7.8.4_A4.3_T6: FAIL_OK
-S7.8.4_A6.1_T4: FAIL_OK
-S7.8.4_A6.2_T1: FAIL_OK
-S7.8.4_A6.2_T2: FAIL_OK
-S7.8.4_A6.4_T1: FAIL_OK
-S7.8.4_A6.4_T2: FAIL_OK
-S7.8.4_A7.1_T4: FAIL_OK
-S7.8.4_A7.2_T1: FAIL_OK
-S7.8.4_A7.2_T2: FAIL_OK
-S7.8.4_A7.2_T3: FAIL_OK
-S7.8.4_A7.2_T4: FAIL_OK
-S7.8.4_A7.2_T5: FAIL_OK
-S7.8.4_A7.2_T6: FAIL_OK
-S7.8.4_A7.4_T1: FAIL_OK
-S7.8.4_A7.4_T2: FAIL_OK
-
-# Sputnik expects unicode escape sequences in RegExp flags to be interpreted.
-# The specification requires them to be passed uninterpreted to the RegExp
-# constructor. We now implement that.
-S7.8.5_A3.1_T7: FAIL_OK
-S7.8.5_A3.1_T8: FAIL_OK
-S7.8.5_A3.1_T9: FAIL_OK
-
-# We allow some keywords to be used as identifiers.
-S7.5.3_A1.15: FAIL_OK
-S7.5.3_A1.18: FAIL_OK
-S7.5.3_A1.21: FAIL_OK
-S7.5.3_A1.22: FAIL_OK
-S7.5.3_A1.23: FAIL_OK
-S7.5.3_A1.24: FAIL_OK
-S7.5.3_A1.26: FAIL_OK
-
-# This checks for non-262 behavior
-S7.6_D1: PASS || FAIL_OK
-S7.6_D2: PASS || FAIL_OK
-S8.4_D1.1: PASS || FAIL_OK
-S8.4_D2.1: PASS || FAIL_OK
-S8.4_D2.2: PASS || FAIL_OK
-S8.4_D2.3: PASS || FAIL_OK
-S8.4_D2.4: PASS || FAIL_OK
-S8.4_D2.5: PASS || FAIL_OK
-S8.4_D2.6: PASS || FAIL_OK
-S8.4_D2.7: PASS || FAIL_OK
-S11.4.3_D1.2: PASS || FAIL_OK
-S12.6.4_A14_T1: PASS || FAIL_OK
-S12.6.4_D1: PASS || FAIL_OK
-S12.6.4_R1: PASS || FAIL_OK
-S12.6.4_R2: PASS || FAIL_OK
-S13.2_D1.2: PASS || FAIL_OK
-S13_D1_T1: PASS || FAIL_OK
-S14_D4_T3: PASS || FAIL_OK
-S14_D7: PASS || FAIL_OK
-S15.1.2.2_D1.2: PASS || FAIL_OK
-S15.5.2_D2: PASS || FAIL_OK
-S15.5.4.11_D1.1_T1: PASS || FAIL_OK
-S15.5.4.11_D1.1_T2: PASS || FAIL_OK
-S15.5.4.11_D1.1_T3: PASS || FAIL_OK
-S15.5.4.11_D1.1_T4: PASS || FAIL_OK
-
-# We allow function declarations within statements
-S12.6.2_A13_T1: FAIL_OK
-S12.6.2_A13_T2: FAIL_OK
-S12.6.4_A13_T1: FAIL_OK
-S12.6.4_A13_T2: FAIL_OK
-S15.3.4.2_A1_T1: FAIL_OK
-
-# Linux and Mac defaults to extended 80 bit floating point format in the FPU.
-# We follow the other major JS engines by keeping this default.
-S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos
-S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos
-
-# These tests fail because we had to add bugs to be compatible with JSC. See
-# http://code.google.com/p/chromium/issues/detail?id=1717
-S15.5.4.1_A1_T2: FAIL_OK
-S15.5.4_A1: FAIL_OK
-S15.5.4_A3: FAIL_OK
-S15.9.5.10_A1_T2: FAIL_OK
-S15.9.5.11_A1_T2: FAIL_OK
-S15.9.5.12_A1_T2: FAIL_OK
-S15.9.5.13_A1_T2: FAIL_OK
-S15.9.5.14_A1_T2: FAIL_OK
-S15.9.5.15_A1_T2: FAIL_OK
-S15.9.5.16_A1_T2: FAIL_OK
-S15.9.5.17_A1_T2: FAIL_OK
-S15.9.5.18_A1_T2: FAIL_OK
-S15.9.5.19_A1_T2: FAIL_OK
-S15.9.5.20_A1_T2: FAIL_OK
-S15.9.5.21_A1_T2: FAIL_OK
-S15.9.5.22_A1_T2: FAIL_OK
-S15.9.5.23_A1_T2: FAIL_OK
-S15.9.5.24_A1_T2: FAIL_OK
-S15.9.5.25_A1_T2: FAIL_OK
-S15.9.5.26_A1_T2: FAIL_OK
-S15.9.5.27_A1_T2: FAIL_OK
-S15.9.5.28_A1_T2: FAIL_OK
-S15.9.5.29_A1_T2: FAIL_OK
-S15.9.5.2_A1_T2: FAIL_OK
-S15.9.5.30_A1_T2: FAIL_OK
-S15.9.5.31_A1_T2: FAIL_OK
-S15.9.5.32_A1_T2: FAIL_OK
-S15.9.5.33_A1_T2: FAIL_OK
-S15.9.5.34_A1_T2: FAIL_OK
-S15.9.5.35_A1_T2: FAIL_OK
-S15.9.5.36_A1_T2: FAIL_OK
-S15.9.5.37_A1_T2: FAIL_OK
-S15.9.5.38_A1_T2: FAIL_OK
-S15.9.5.39_A1_T2: FAIL_OK
-S15.9.5.3_A1_T2: FAIL_OK
-S15.9.5.40_A1_T2: FAIL_OK
-S15.9.5.41_A1_T2: FAIL_OK
-S15.9.5.42_A1_T2: FAIL_OK
-S15.9.5.4_A1_T2: FAIL_OK
-S15.9.5.5_A1_T2: FAIL_OK
-S15.9.5.6_A1_T2: FAIL_OK
-S15.9.5.7_A1_T2: FAIL_OK
-S15.9.5.8_A1_T2: FAIL_OK
-S15.9.5.9_A1_T2: FAIL_OK
-
-############################# ES3 TESTS ################################
-# These tests check for ES3 semantics, and differ from ES5.
-# When we follow ES5 semantics, it's ok to fail the test.
-
-# Allow keywords as names of properties in object initialisers and
-# in dot-notation property access.
-S11.1.5_A4.1: FAIL_OK
-S11.1.5_A4.2: FAIL_OK
-
-# Calls builtins without an explicit receiver which means that
-# undefined is passed to the builtin. The tests expect the global
-# object to be passed which was true in ES3 but not in ES5.
-S11.1.1_A2: FAIL_OK
-S15.5.4.4_A1_T3: FAIL_OK
-S15.5.4.5_A1_T3: FAIL_OK
-S15.5.4.6_A1_T3: FAIL_OK
-S15.5.4.7_A1_T3: FAIL_OK
-S15.5.4.8_A1_T3: FAIL_OK
-S15.5.4.9_A1_T3: FAIL_OK
-S15.5.4.10_A1_T3: FAIL_OK
-S15.5.4.11_A1_T3: FAIL_OK
-S15.5.4.12_A1_T3: FAIL_OK
-S15.5.4.13_A1_T3: FAIL_OK
-S15.5.4.14_A1_T3: FAIL_OK
-S15.5.4.15_A1_T3: FAIL_OK
-
-# NaN, Infinity and undefined are read-only according to ES5.
-S15.1.1.1_A2_T1: FAIL_OK # NaN
-S15.1.1.1_A2_T2: FAIL_OK # NaN
-S15.1.1.2_A2_T1: FAIL_OK # Infinity
-# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45.
-S15.1.1.3_A2_T1: FAIL_OK # undefined
-S15.1.1.3_A2_T2: FAIL_OK # undefined
-
-# Array.prototype.to[Locale]String is generic in ES5.
-S15.4.4.2_A2_T1: FAIL_OK
-S15.4.4.3_A2_T1: FAIL_OK
-
-######################### UNANALYZED FAILURES ##########################
-
+# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Global.undefined)
+11.13.1-4-27-s: FAIL
+# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
+# in strict mode (Global.Infinity)
+11.13.1-4-3-s: FAIL
+# BUG: Global.NaN is a data property with default attribute values
+15.1.1.1-0: FAIL
+# BUG: Global.Infinity is a data property with default attribute values
+15.1.1.2-0: FAIL
+# BUG: Global.undefined is a data property with default attribute values
+15.1.1.3-0: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (Global.NaN)
+15.2.3.3-4-178: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (Global.Infinity)
+15.2.3.3-4-179: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (Global.undefined)
+15.2.3.3-4-180: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (RegExp.prototype.source)
+# There is no RegExp.prototype.source
+15.2.3.3-4-212: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (RegExp.prototype.global)
+# There is no RegExp.prototype.global
+15.2.3.3-4-213: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (RegExp.prototype.ignoreCase)
+# There is no RegExp.prototype.ignoreCase
+15.2.3.3-4-214: FAIL
+# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
+# for properties on built-ins (RegExp.prototype.multiline)
+15.2.3.3-4-215: FAIL
# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
# property to true successfully when [[Enumerable]] attribute of 'name'
# is false and [[Configurable]] attribute of 'name' is true, the 'desc'
@@ -387,6 +162,268 @@ S15.4.4.3_A2_T1: FAIL_OK
# generic descriptor which only contains [[Enumerable]] attribute as true,
# 'name' property is an index accessor property (8.12.9 step 8)
15.2.3.6-4-82-24: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, test the length property of 'O'
+# is own data property (15.4.5.1 step 1)
+15.2.3.6-4-116: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, test the length property of 'O'
+# is own data property that overrides an inherited data property (15.4.5.1
+# step 1)
+15.2.3.6-4-117: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test that RangeError exception is thrown when [[Value]] field of
+# 'desc' is undefined (15.4.5.1 step 3.c)
+15.2.3.6-4-125: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is null (15.4.5.1 step 3.c)
+15.2.3.6-4-126: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is a boolean with value false
+# (15.4.5.1 step 3.c)
+15.2.3.6-4-127: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is a boolean with value true
+# (15.4.5.1 step 3.c)
+15.2.3.6-4-128: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is not thrown when the [[Value]] field of
+# 'desc' is 0 (15.4.5.1 step 3.c)
+15.2.3.6-4-129: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is not thrown when the [[Value]] field of
+# 'desc' is +0 (15.4.5.1 step 3.c)
+15.2.3.6-4-130: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is not thrown when the [[Value]] field of
+# 'desc' is -0 (15.4.5.1 step 3.c)
+15.2.3.6-4-131: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is not thrown when the [[Value]] field of
+# 'desc' is a positive number (15.4.5.1 step 3.c)
+15.2.3.6-4-132: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is a negative number (15.4.5.1 step 3.c)
+15.2.3.6-4-133: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is +Infinity (15.4.5.1 step 3.c)
+15.2.3.6-4-134: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is -Infinity (15.4.5.1 step 3.c)
+15.2.3.6-4-135: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is NaN (15.4.5.1 step 3.c)
+15.2.3.6-4-136: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is not thrown when the [[Value]] field of
+# 'desc' is a string containing a positive number (15.4.5.1 step 3.c)
+15.2.3.6-4-137: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is a string containing a negative number (15.4.5.1 step 3.c)
+15.2.3.6-4-138: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is a string containing a decimal number (15.4.5.1 step 3.c)
+15.2.3.6-4-139: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is a string containing +Infinity (15.4.5.1 step 3.c)
+15.2.3.6-4-140: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is a string containing -Infinity (15.4.5.1 step 3.c)
+15.2.3.6-4-141: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is a string containing an
+# exponential number (15.4.5.1 step 3.c)
+15.2.3.6-4-142: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is a string containing a hex
+# number (15.4.5.1 step 3.c)
+15.2.3.6-4-143: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is a string containing a number
+# with leading zeros (15.4.5.1 step 3.c)
+15.2.3.6-4-144: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError exception is thrown when the [[Value]] field of
+# 'desc' is a string which doesn't convert to a number (15.4.5.1 step 3.c)
+15.2.3.6-4-145: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is an object which has an own
+# toString method (15.4.5.1 step 3.c)
+15.2.3.6-4-146: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# valueOf method (15.4.5.1 step 3.c)
+15.2.3.6-4-147: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# valueOf method that returns an object and toString method that returns a
+# string (15.4.5.1 step 3.c)
+15.2.3.6-4-148: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# toString and valueOf method (15.4.5.1 step 3.c)
+15.2.3.6-4-149: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test TypeError is thrown when the [[Value]] field of 'desc' is an
+# Object that both toString and valueOf wouldn't return primitive value
+# (15.4.5.1 step 3.c)
+15.2.3.6-4-150: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', and the [[Value]] field of 'desc' is an Object with an own toString
+# method and an inherited valueOf method (15.4.5.1 step 3.c), test that the
+# inherited valueOf method is used
+15.2.3.6-4-151: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError is thrown when the [[Value]] field of 'desc' is a
+# positive non-integer values (15.4.5.1 step 3.c)
+15.2.3.6-4-152: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length prosperty
+# of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is a
+# negative non-integer values (15.4.5.1 step 3.c)
+15.2.3.6-4-153: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 2
+# (15.4.5.1 step 3.c)
+15.2.3.6-4-154: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 1
+# (15.4.5.1 step 3.c)
+15.2.3.6-4-155: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
+# boundary value 2^32 (15.4.5.1 step 3.c)
+15.2.3.6-4-156: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
+# boundary value 2^32 + 1 (15.4.5.1 step 3.c)
+15.2.3.6-4-157: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', set the [[Value]] field of 'desc' to a value greater than the
+# existing value of length (15.4.5.1 step 3.f)
+15.2.3.6-4-159: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', set the [[Value]] field of 'desc' to a value lesser than the
+# existing value of length and test that indexes beyond the new length are
+# deleted(15.4.5.1 step 3.f)
+15.2.3.6-4-161: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to true after deleting properties with large index named if the
+# [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h)
+15.2.3.6-4-165: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to true after deleting properties with large index named if the
+# [[Writable]] field of 'desc' is true (15.4.5.1 step 3.h)
+15.2.3.6-4-166: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to false after deleting properties with large index named if the
+# [[Writable]] field of 'desc' is false (15.4.5.1 step 3.i.ii)
+15.2.3.6-4-167: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', whose writable attribute is being changed to false and the [[Value]]
+# field of 'desc' is less than value of the length property and also lesser
+# than an index of the array which is set to configurable:false, test that
+# new length is set to a value greater than the non-deletable index by 1,
+# writable attribute of length is set to false and TypeError exception is
+# thrown (15.4.5.1 step 3.i.iii)
+15.2.3.6-4-168: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property and also lesser than an index of the array which is set to
+# configurable: false, test that new length is set to a value greater than
+# the non-deletable index by 1, and TypeError is thrown (15.4.5.1 step
+# 3.l.i)
+15.2.3.6-4-169: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property and also lesser than an index of the array which is set to
+# configurable: false, test that new length is set to a value greater than
+# the non-deletable index by 1, writable attribute of length is set to
+# false and TypeError exception is thrown (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-170: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of an inherited data
+# property with large index named in 'O' can't stop deleting index named
+# properties (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-171: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own data property with
+# large index named in 'O' that overrides an inherited data property can
+# stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-172: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own data property with
+# large index named in 'O' that overrides an inherited accessor property
+# can stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-173: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own accessor property
+# with large index named in 'O' can stop deleting index named properties
+# (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-174: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of an inherited accessor
+# property with large index named in 'O' can't stop deleting index named
+# properties (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-175: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own accessor property
+# with large index named in 'O' that overrides an inherited data property
+# can stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-176: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own accessor property
+# with large index named in 'O' that overrides an inherited accessor
+# property can stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-177: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the configurable large index named property of 'O' is
+# deleted (15.4.5.1 step 3.l.ii)
+15.2.3.6-4-178: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is greater than value of the length
+# property, test value of the length property is same as [[Value]]
+# (15.4.5.1 step 3.l.iii.1)
+15.2.3.6-4-179-1: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to false at last when the [[Writable]] field of 'desc' is false and 'O'
+# doesn't contain non-configurable large index named property (15.4.5.1
+# step 3.m)
+15.2.3.6-4-181: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
+# property, 'name' is boundary value 2^32 - 2 (15.4.5.1 step 4.a)
+15.2.3.6-4-183: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
+# property, test TypeError is thrown if the [[Writable]] attribute of the
+# length property in 'O' is false and value of 'name' equals to value of
+# the length property (15.4.5.1 step 4.b)
+15.2.3.6-4-188: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
+# property, test TypeError is thrown if the [[Writable]] attribute of the
+# length property in 'O' is false and value of 'name' is greater than value
+# of the length property (15.4.5.1 step 4.b)
+15.2.3.6-4-189: FAIL
# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
# property, 'desc' is accessor descriptor, test updating all attribute
# values of 'name' (15.4.5.1 step 4.c)
@@ -405,6 +442,16 @@ S15.4.4.3_A2_T1: FAIL_OK
# property, name is accessor property and 'desc' is accessor descriptor,
# test updating multiple attribute values of 'name' (15.4.5.1 step 4.c)
15.2.3.6-4-273: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
+# property, test the length property of 'O' is set as ToUint32('name') + 1
+# if ToUint32('name') equals to value of the length property in 'O'
+# (15.4.5.1 step 4.e.ii)
+15.2.3.6-4-275: FAIL
+# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
+# property, test the length property of 'O' is set as ToUint32('name') + 1
+# if ToUint32('name') is greater than value of the length property in 'O'
+# (15.4.5.1 step 4.e.ii)
+15.2.3.6-4-276: FAIL
# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
# formal parameters, 'name' is own accessor property of 'O' which is also
# defined in [[ParameterMap]] of 'O', and 'desc' is accessor descriptor,
@@ -519,6 +566,269 @@ S15.4.4.3_A2_T1: FAIL_OK
15.2.3.6-4-623: FAIL
# Bug? ES5 Attributes - all attributes in Date.prototype.toJSON are correct
15.2.3.6-4-624: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, test the length property of
+# 'O' is own data property (15.4.5.1 step 1)
+15.2.3.7-6-a-112: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, test the length property of
+# 'O' is own data property that overrides an inherited data property
+# (15.4.5.1 step 1)
+15.2.3.7-6-a-113: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', test RangeError is thrown when setting the [[Value]] field of 'desc'
+# to undefined (15.4.5.1 step 3.c)
+15.2.3.7-6-a-121: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', test setting the [[Value]] field of 'desc' to null actuall is set to
+# 0 (15.4.5.1 step 3.c)
+15.2.3.7-6-a-122: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a boolean with value false
+# (15.4.5.1 step 3.c)
+15.2.3.7-6-a-123: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a boolean with value true
+# (15.4.5.1 step 3.c)
+15.2.3.7-6-a-124: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is 0 (15.4.5.1 step 3.c)
+15.2.3.7-6-a-125: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is +0 (15.4.5.1 step 3.c)
+15.2.3.7-6-a-126: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is -0 (15.4.5.1 step 3.c)
+15.2.3.7-6-a-127: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is positive number (15.4.5.1
+# step 3.c)
+15.2.3.7-6-a-128: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is negative number (15.4.5.1
+# step 3.c)
+15.2.3.7-6-a-129: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is +Infinity (15.4.5.1 step
+# 3.c)
+15.2.3.7-6-a-130: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is -Infinity (15.4.5.1 step
+# 3.c)
+15.2.3.7-6-a-131: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is NaN (15.4.5.1 step 3.c)
+15.2.3.7-6-a-132: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing a
+# positive number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-133: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing a
+# negative number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-134: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing a
+# decimal number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-135: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing
+# +Infinity (15.4.5.1 step 3.c)
+15.2.3.7-6-a-136: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing
+# -Infinity (15.4.5.1 step 3.c)
+15.2.3.7-6-a-137: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing an
+# exponential number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-138: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing an hex
+# number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-139: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is a string containing an
+# leading zero number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-140: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', test the [[Value]] field of 'desc' is a string which doesn't convert
+# to a number (15.4.5.1 step 3.c)
+15.2.3.7-6-a-141: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# toString method (15.4.5.1 step 3.c)
+15.2.3.7-6-a-142: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# valueOf method (15.4.5.1 step 3.c)
+15.2.3.7-6-a-143: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# valueOf method that returns an object and toString method that returns a
+# string (15.4.5.1 step 3.c)
+15.2.3.7-6-a-144: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is an Object which has an own
+# toString and valueOf method (15.4.5.1 step 3.c)
+15.2.3.7-6-a-145: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test TypeError is thrown when the [[Value]] field of 'desc' is an
+# Object that both toString and valueOf wouldn't return primitive value
+# (15.4.5.1 step 3.c)
+15.2.3.7-6-a-146: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test using inherited valueOf method when the [[Value]] field of
+# 'desc' is an Objec with an own toString and inherited valueOf methods
+# (15.4.5.1 step 3.c)
+15.2.3.7-6-a-147: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
+# positive non-integer values (15.4.5.1 step 3.c)
+15.2.3.7-6-a-148: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
+# negative non-integer values (15.4.5.1 step 3.c)
+15.2.3.7-6-a-149: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 2
+# (15.4.5.1 step 3.c)
+15.2.3.7-6-a-150: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 1
+# (15.4.5.1 step 3.c)
+15.2.3.7-6-a-151: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
+# boundary value 2^32 (15.4.5.1 step 3.c)
+15.2.3.7-6-a-152: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
+# of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
+# boundary value 2^32 + 1 (15.4.5.1 step 3.c)
+15.2.3.7-6-a-153: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', test the [[Value]] field of 'desc' which is greater than value of
+# the length property is defined into 'O' without deleting any property
+# with large index named (15.4.5.1 step 3.f)
+15.2.3.7-6-a-155: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', test the [[Value]] field of 'desc' which is less than value of the
+# length property is defined into 'O' with deleting properties with large
+# index named (15.4.5.1 step 3.f)
+15.2.3.7-6-a-157: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to true at last after deleting properties with large index named if the
+# [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h)
+15.2.3.7-6-a-161: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to true at last after deleting properties with large index named if the
+# [[Writable]] field of 'desc' is true (15.4.5.1 step 3.h)
+15.2.3.7-6-a-162: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to false at last after deleting properties with large index named if the
+# [[Writable]] field of 'desc' is false (15.4.5.1 step 3.i.ii)
+15.2.3.7-6-a-163: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property in 'O'
+# is set as true before deleting properties with large index named
+# (15.4.5.1 step 3.i.iii)
+15.2.3.7-6-a-164: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the length property is decreased by 1 (15.4.5.1 step
+# 3.l.i)
+15.2.3.7-6-a-165: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own data property with
+# large index named in 'O' can stop deleting index named properties
+# (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-166: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of inherited data property
+# with large index named in 'O' can't stop deleting index named properties
+# (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-167: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own data property with
+# large index named in 'O' that overrides inherited data property can stop
+# deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-168: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own data property with
+# large index named in 'O' that overrides inherited accessor property can
+# stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-169: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own accessor property
+# with large index named in 'O' can stop deleting index named properties
+# (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-170: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of inherited accessor
+# property with large index named in 'O' can't stop deleting index named
+# properties (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-171: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own accessor property
+# with large index named in 'O' that overrides inherited data property can
+# stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-172: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Configurable]] attribute of own accessor property
+# with large index named in 'O' that overrides inherited accessor property
+# can stop deleting index named properties (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-173: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the configurable large index named property of 'O' can be
+# deleted (15.4.5.1 step 3.l.ii)
+15.2.3.7-6-a-174: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test value of the length property is set to the last
+# non-configurable index named property of 'O' plus 1 (15.4.5.1 step
+# 3.l.iii.1)
+15.2.3.7-6-a-175: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to false at last when the [[Writable]] field of 'desc' is false and 'O'
+# contains non-configurable large index named property (15.4.5.1 step
+# 3.l.iii.2)
+15.2.3.7-6-a-176: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
+# 'O', the [[Value]] field of 'desc' is less than value of the length
+# property, test the [[Writable]] attribute of the length property is set
+# to false at last when the [[Writable]] field of 'desc' is false and 'O'
+# doesn't contain non-configurable large index named property (15.4.5.1
+# step 3.m)
+15.2.3.7-6-a-177: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
+# property, 'P' is boundary value 2^32 - 2 (15.4.5.1 step 4.a)
+15.2.3.7-6-a-179: FAIL
+# Bug? Object.defineProperties - TypeError is thrown if 'O' is an Array, 'P' is
+# an array index named property,[[Writable]] attribute of the length
+# property in 'O' is false, value of 'P' is equal to value of the length
+# property in 'O' (15.4.5.1 step 4.b)
+15.2.3.7-6-a-184: FAIL
+# Bug? Object.defineProperties - TypeError is thrown if 'O' is an Array, 'P' is
+# an array index named property,[[Writable]] attribute of the length
+# property in 'O' is false, value of 'P' is bigger than value of the length
+# property in 'O' (15.4.5.1 step 4.b)
+15.2.3.7-6-a-185: FAIL
# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
# property, 'desc' is accessor descriptor, test updating all attribute
# values of 'P' (15.4.5.1 step 4.c)
@@ -538,6 +848,16 @@ S15.4.4.3_A2_T1: FAIL_OK
# accessor descriptor, test updating multiple attribute values of 'P'
# (15.4.5.1 step 4.c)
15.2.3.7-6-a-262: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
+# property, test the length property of 'O' is set as ToUint32('P') + 1 if
+# ToUint32('P') equals to value of the length property in 'O' (15.4.5.1
+# step 4.e.ii)
+15.2.3.7-6-a-264: FAIL
+# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
+# property, test the length property of 'O' is set as ToUint32('P') + 1 if
+# ToUint32('P') is greater than value of the length property in 'O'
+# (15.4.5.1 step 4.e.ii)
+15.2.3.7-6-a-265: FAIL
# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own accessor
# property of 'O' which is also defined in [[ParameterMap]] of 'O', and
# 'desc' is accessor descriptor, test updating multiple attribute values of
@@ -600,21 +920,239 @@ S15.4.4.3_A2_T1: FAIL_OK
# Bug? Array.prototype.indexOf - decreasing length of array does not delete
# non-configurable properties
15.4.4.14-9-a-19: FAIL
+# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
+# property that overrides an inherited data property on an Array
+15.4.4.14-9-b-i-11: FAIL
+# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
+# property that overrides an inherited accessor property on an Array
+15.4.4.14-9-b-i-13: FAIL
+# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
+# property without a get function on an Array
+15.4.4.14-9-b-i-17: FAIL
+# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
+# property without a get function that overrides an inherited accessor
+# property on an Array
+15.4.4.14-9-b-i-19: FAIL
+# Bug? Array.prototype.indexOf - side-effects are visible in subsequent
+# iterations on an Array
+15.4.4.14-9-b-i-28: FAIL
+# Bug? Array.prototype.indexOf - terminates iteration on unhandled exception on
+# an Array
+15.4.4.14-9-b-i-30: FAIL
+# Bug? Array.prototype.lastIndexOf - deleting property of prototype causes
+# prototype index property not to be visited on an Array
+15.4.4.15-8-a-14: FAIL
# Bug? Array.prototype.lastIndexOf - decreasing length of array does not delete
# non-configurable properties
15.4.4.15-8-a-19: FAIL
+# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
+# property that overrides an inherited data property on an Array
+15.4.4.15-8-b-i-11: FAIL
+# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
+# property that overrides an inherited accessor property on an Array
+15.4.4.15-8-b-i-13: FAIL
+# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
+# property without a get function on an Array
+15.4.4.15-8-b-i-17: FAIL
+# Bug? Array.prototype.lastIndexOf - side-effects are visible in subsequent
+# iterations on an Array
+15.4.4.15-8-b-i-28: FAIL
+# Bug? Array.prototype.lastIndexOf terminates iteration on unhandled exception
+# on an Array
+15.4.4.15-8-b-i-30: FAIL
+# Bug? Array.prototype.every applied to boolean primitive
+15.4.4.16-1-3: FAIL
+# Bug? Array.prototype.every applied to number primitive
+15.4.4.16-1-5: FAIL
+# Bug? Array.prototype.every applied to string primitive
+15.4.4.16-1-7: FAIL
+# Bug? Array.prototype.every - side effects produced by step 2 are visible when
+# an exception occurs
+15.4.4.16-4-8: FAIL
+# Bug? Array.prototype.every - side effects produced by step 3 are visible when
+# an exception occurs
+15.4.4.16-4-9: FAIL
+# Bug? Array.prototype.every - the exception is not thrown if exception was
+# thrown by step 2
+15.4.4.16-4-10: FAIL
+# Bug? Array.prototype.every - the exception is not thrown if exception was
+# thrown by step 3
+15.4.4.16-4-11: FAIL
+# Bug? Array.prototype.every - calling with no callbackfn is the same as passing
+# undefined for callbackfn
+15.4.4.16-4-15: FAIL
# Bug? Array.prototype.every - decreasing length of array does not delete
# non-configurable properties
15.4.4.16-7-b-16: FAIL
+# Bug? Array.prototype.every - element to be retrieved is own accessor property
+# on an Array
+15.4.4.16-7-c-i-10: FAIL
+# Bug? Array.prototype.every - element to be retrieved is own accessor property
+# that overrides an inherited data property on an Array
+15.4.4.16-7-c-i-12: FAIL
+# Bug? Array.prototype.every - element to be retrieved is own accessor property
+# that overrides an inherited accessor property on an Array
+15.4.4.16-7-c-i-14: FAIL
+# Bug? Array.prototype.every - element to be retrieved is own accessor property
+# without a get function on an Array
+15.4.4.16-7-c-i-18: FAIL
+# Bug? Array.prototype.every - element to be retrieved is own accessor property
+# without a get function that overrides an inherited accessor property on
+# an Array
+15.4.4.16-7-c-i-20: FAIL
+# Bug? Array.prototype.every - element changed by getter on previous iterations
+# is observed on an Array
+15.4.4.16-7-c-i-28: FAIL
+# Bug? Array.prototype.some applied to boolean primitive
+15.4.4.17-1-3: FAIL
+# Bug? Array.prototype.some applied to number primitive
+15.4.4.17-1-5: FAIL
+# Bug? Array.prototype.some applied to applied to string primitive
+15.4.4.17-1-7: FAIL
+# Bug? Array.prototype.some - side effects produced by step 2 are visible when
+# an exception occurs
+15.4.4.17-4-8: FAIL
+# Bug? Array.prototype.some - side effects produced by step 3 are visible when
+# an exception occurs
+15.4.4.17-4-9: FAIL
+# Bug? Array.prototype.some - the exception is not thrown if exception was
+# thrown by step 2
+15.4.4.17-4-10: FAIL
+# Bug? Array.prototype.some - the exception is not thrown if exception was
+# thrown by step 3
+15.4.4.17-4-11: FAIL
+# Bug? Array.prototype.some - calling with no callbackfn is the same as passing
+# undefined for callbackfn
+15.4.4.17-4-15: FAIL
# Bug? Array.prototype.some - decreasing length of array does not delete
# non-configurable properties
15.4.4.17-7-b-16: FAIL
+# Bug? Array.prototype.some - element to be retrieved is own accessor property
+# on an Array
+15.4.4.17-7-c-i-10: FAIL
+# Bug? Array.prototype.some - element to be retrieved is own accessor property
+# that overrides an inherited data property on an Array
+15.4.4.17-7-c-i-12: FAIL
+# Bug? Array.prototype.some - element to be retrieved is own accessor property
+# that overrides an inherited accessor property on an Array
+15.4.4.17-7-c-i-14: FAIL
+# Bug? Array.prototype.some - element to be retrieved is own accessor property
+# without a get function on an Array
+15.4.4.17-7-c-i-18: FAIL
+# Bug? Array.prototype.some - element to be retrieved is own accessor property
+# without a get function that overrides an inherited accessor property on
+# an Array
+15.4.4.17-7-c-i-20: FAIL
+# Bug? Array.prototype.some - element changed by getter on previous iterations
+# is observed on an Array
+15.4.4.17-7-c-i-28: FAIL
+# Bug? Array.prototype.forEach applied to boolean primitive
+15.4.4.18-1-3: FAIL
+# Bug? Array.prototype.forEach applied to number primitive
+15.4.4.18-1-5: FAIL
+# Bug? Array.prototype.forEach applied to string primitive
+15.4.4.18-1-7: FAIL
+# Bug? Array.prototype.forEach - side effects produced by step 2 are visible
+# when an exception occurs
+15.4.4.18-4-8: FAIL
+# Bug? Array.prototype.forEach - side effects produced by step 3 are visible
+# when an exception occurs
+15.4.4.18-4-9: FAIL
+# Bug? Array.prototype.forEach - the exception is not thrown if exception was
+# thrown by step 2
+15.4.4.18-4-10: FAIL
+# Bug? Array.prototype.forEach - the exception is not thrown if exception was
+# thrown by step 3
+15.4.4.18-4-11: FAIL
+# Bug? Array.prototype.forEach - calling with no callbackfn is the same as
+# passing undefined for callbackfn
+15.4.4.18-4-15: FAIL
# Bug? Array.prototype.forEach - decreasing length of array does not delete
# non-configurable properties
15.4.4.18-7-b-16: FAIL
+# Bug? Array.prototype.forEach - element to be retrieved is own accessor
+# property on an Array
+15.4.4.18-7-c-i-10: FAIL
+# Bug? Array.prototype.forEach - element to be retrieved is own accessor
+# property that overrides an inherited data property on an Array
+15.4.4.18-7-c-i-12: FAIL
+# Bug? Array.prototype.forEach - element to be retrieved is own accessor
+# property that overrides an inherited accessor property on an Array
+15.4.4.18-7-c-i-14: FAIL
+# Bug? Array.prototype.forEach - element to be retrieved is own accessor
+# property without a get function on an Array
+15.4.4.18-7-c-i-18: FAIL
+# Bug? Array.prototype.forEach - element to be retrieved is own accessor
+# property without a get function that overrides an inherited accessor
+# property on an Array
+15.4.4.18-7-c-i-20: FAIL
+# Bug? Array.prototype.forEach - element changed by getter on previous
+# iterations is observed on an Array
+15.4.4.18-7-c-i-28: FAIL
+# Bug? Array.prototype.map - applied to boolean primitive
+15.4.4.19-1-3: FAIL
+# Bug? Array.prototype.map - applied to number primitive
+15.4.4.19-1-5: FAIL
+# Bug? Array.prototype.map - applied to string primitive
+15.4.4.19-1-7: FAIL
+# Bug? Array.prototype.map - Side effects produced by step 2 are visible when an
+# exception occurs
+15.4.4.19-4-8: FAIL
+# Bug? Array.prototype.map - Side effects produced by step 3 are visible when an
+# exception occurs
+15.4.4.19-4-9: FAIL
+# Bug? Array.prototype.map - the exception is not thrown if exception was thrown
+# by step 2
+15.4.4.19-4-10: FAIL
+# Bug? Array.prototype.map - the exception is not thrown if exception was thrown
+# by step 3
+15.4.4.19-4-11: FAIL
+# Bug? Array.prototype.map - calling with no callbackfn is the same as passing
+# undefined for callbackfn
+15.4.4.19-4-15: FAIL
# Bug? Array.prototype.map - decreasing length of array does not delete
# non-configurable properties
15.4.4.19-8-b-16: FAIL
+# Bug? Array.prototype.map - element to be retrieved is own accessor property on
+# an Array
+15.4.4.19-8-c-i-10: FAIL
+# Bug? Array.prototype.map - element to be retrieved is own accessor property
+# that overrides an inherited data property on an Array
+15.4.4.19-8-c-i-12: FAIL
+# Bug? Array.prototype.map - element to be retrieved is own accessor property
+# that overrides an inherited accessor property on an Array
+15.4.4.19-8-c-i-14: FAIL
+# Bug? Array.prototype.map - element to be retrieved is own accessor property
+# without a get function on an Array
+15.4.4.19-8-c-i-18: FAIL
+# Bug? Array.prototype.map - element to be retrieved is own accessor property
+# without a get function that overrides an inherited accessor property on
+# an Array
+15.4.4.19-8-c-i-19: FAIL
+# Bug? Array.prototype.map - element changed by getter on previous iterations is
+# observed on an Array
+15.4.4.19-8-c-i-28: FAIL
+# Bug? Array.prototype.filter applied to boolean primitive
+15.4.4.20-1-3: FAIL
+# Bug? Array.prototype.filter applied to number primitive
+15.4.4.20-1-5: FAIL
+# Bug? Array.prototype.filter applied to string primitive
+15.4.4.20-1-7: FAIL
+# Bug? Array.prototype.filter - side effects produced by step 2 are visible when
+# an exception occurs
+15.4.4.20-4-8: FAIL
+# Bug? Array.prototype.filter - side effects produced by step 3 are visible when
+# an exception occurs
+15.4.4.20-4-9: FAIL
+# Bug? Array.prototype.filter - the exception is not thrown if exception was
+# thrown by step 2
+15.4.4.20-4-10: FAIL
+# Bug? Array.prototype.filter - the exception is not thrown if exception was
+# thrown by step 3
+15.4.4.20-4-11: FAIL
+# Bug? Array.prototype.filter - calling with no callbackfn is the same as
+# passing undefined for callbackfn
+15.4.4.20-4-15: FAIL
# Bug? Array.prototype.filter - properties can be added to prototype after
# current position are visited on an Array-like object
15.4.4.20-9-b-6: FAIL
@@ -625,29 +1163,144 @@ S15.4.4.3_A2_T1: FAIL_OK
# that overrides an inherited accessor property on an Array
15.4.4.20-9-c-i-6: FAIL
# Bug? Array.prototype.filter - element to be retrieved is own accessor property
+# on an Array
+15.4.4.20-9-c-i-10: FAIL
+# Bug? Array.prototype.filter - element to be retrieved is own accessor property
+# that overrides an inherited data property on an Array
+15.4.4.20-9-c-i-12: FAIL
+# Bug? Array.prototype.filter - element to be retrieved is own accessor property
# that overrides an inherited accessor property on an Array
15.4.4.20-9-c-i-14: FAIL
# Bug? Array.prototype.filter - element to be retrieved is inherited accessor
# property on an Array
15.4.4.20-9-c-i-16: FAIL
+# Bug? Array.prototype.filter - element to be retrieved is own accessor property
+# without a get function on an Array
+15.4.4.20-9-c-i-18: FAIL
+# Bug? Array.prototype.filter - element to be retrieved is own accessor property
+# without a get function that overrides an inherited accessor property on
+# an Array
+15.4.4.20-9-c-i-20: FAIL
# Bug? Array.prototype.filter - element to be retrieved is inherited accessor
# property without a get function on an Array
15.4.4.20-9-c-i-22: FAIL
+# Bug? Array.prototype.filter - element changed by getter on previous iterations
+# is observed on an Array
+15.4.4.20-9-c-i-28: FAIL
+# Bug? Array.prototype.reduce applied to boolean primitive
+15.4.4.21-1-3: FAIL
+# Bug? Array.prototype.reduce applied to number primitive
+15.4.4.21-1-5: FAIL
+# Bug? Array.prototype.reduce applied to string primitive
+15.4.4.21-1-7: FAIL
+# Bug? Array.prototype.reduce - side effects produced by step 2 are visible when
+# an exception occurs
+15.4.4.21-4-8: FAIL
+# Bug? Array.prototype.reduce - side effects produced by step 3 are visible when
+# an exception occurs
+15.4.4.21-4-9: FAIL
+# Bug? Array.prototype.reduce - the exception is not thrown if exception was
+# thrown by step 2
+15.4.4.21-4-10: FAIL
+# Bug? Array.prototype.reduce - the exception is not thrown if exception was
+# thrown by step 3
+15.4.4.21-4-11: FAIL
+# Bug? Array.prototype.reduce - calling with no callbackfn is the same as
+# passing undefined for callbackfn
+15.4.4.21-4-15: FAIL
# Bug? Array.prototype.reduce - decreasing length of array in step 8 does not
# delete non-configurable properties
15.4.4.21-9-b-16: FAIL
# Bug? Array.prototype.reduce - decreasing length of array does not delete
# non-configurable properties
15.4.4.21-9-b-29: FAIL
+# Bug? Array.prototype.reduceRight applied to boolean primitive
+15.4.4.22-1-3: FAIL
+# Bug? Array.prototype.reduceRight applied to number primitive
+15.4.4.22-1-5: FAIL
+# Bug? Array.prototype.reduceRight applied to string primitive
+15.4.4.22-1-7: FAIL
+# Bug? Array.prototype.reduceRight - side effects produced by step 2 are visible
+# when an exception occurs
+15.4.4.22-4-8: FAIL
+# Bug? Array.prototype.reduceRight - side effects produced by step 3 are visible
+# when an exception occurs
+15.4.4.22-4-9: FAIL
+# Bug? Array.prototype.reduceRight - the exception is not thrown if exception
+# was thrown by step 2
+15.4.4.22-4-10: FAIL
+# Bug? Array.prototype.reduceRight - the exception is not thrown if exception
+# was thrown by step 3
+15.4.4.22-4-11: FAIL
+# Bug? Array.prototype.reduceRight - calling with no callbackfn is the same as
+# passing undefined for callbackfn
+15.4.4.22-4-15: FAIL
+# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
+# property that overrides an inherited data property on an Array
+15.4.4.22-8-b-iii-1-12: FAIL
+# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
+# property without a get function on an Array
+15.4.4.22-8-b-iii-1-18: FAIL
+# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
+# property without a get function that overrides an inherited accessor
+# property on an Array
+15.4.4.22-8-b-iii-1-20: FAIL
+# Bug? Array.prototype.reduceRight - element changed by getter on current
+# iteration is observed in subsequent iterations on an Array
+15.4.4.22-8-b-iii-1-30: FAIL
+# Bug? Array.prototype.reduceRight - Exception in getter terminate iteration on
+# an Array
+15.4.4.22-8-b-iii-1-33: FAIL
+# Bug? Array.prototype.reduceRight - modifications to length don't change number
+# of iterations in step 9
+15.4.4.22-8-b-2: FAIL
+# Bug? Array.prototype.reduceRight - deleting own property in step 8 causes
+# deleted index property not to be visited on an Array
+15.4.4.22-9-b-9: FAIL
+# Bug? Array.prototype.reduceRight - deleting own property with prototype
+# property in step 8 causes prototype index property to be visited on an
+# Array
+15.4.4.22-9-b-13: FAIL
# Bug? Array.prototype.reduceRight - decreasing length of array in step 8 does
# not delete non-configurable properties
15.4.4.22-9-b-16: FAIL
+# Bug? Array.prototype.reduceRight - deleting property of prototype causes
+# deleted index property not to be visited on an Array
+15.4.4.22-9-b-24: FAIL
+# Bug? Array.prototype.reduceRight - deleting own property with prototype
+# property causes prototype index property to be visited on an Array
+15.4.4.22-9-b-26: FAIL
# Bug? Array.prototype.reduceRight - decreasing length of array does not delete
# non-configurable properties
15.4.4.22-9-b-29: FAIL
+# Bug? Array.prototype.reduceRight - element changed by getter on previous
+# iterations is observed on an Array
+15.4.4.22-9-c-i-30: FAIL
# Bug? Array.prototype.reduceRight - modifications to length will change number
# of iterations
15.4.4.22-9-9: FAIL
+# Bug? String.prototype.trim - 'S' is a string with all WhiteSpace
+15.5.4.20-3-2: FAIL
+# Bug? String.prototype.trim - 'S' is a string with all union of WhiteSpace and
+# LineTerminator
+15.5.4.20-3-3: FAIL
+# Bug? String.prototype.trim - 'S' is a string start with union of all
+# LineTerminator and all WhiteSpace
+15.5.4.20-3-4: FAIL
+# Bug? String.prototype.trim - 'S' is a string end with union of all
+# LineTerminator and all WhiteSpace
+15.5.4.20-3-5: FAIL
+# Bug? String.prototype.trim - 'S' is a string start with union of all
+# LineTerminator and all WhiteSpace and end with union of all
+# LineTerminator and all WhiteSpace
+15.5.4.20-3-6: FAIL
+# Bug? String.prototype.trim handles whitepace and lineterminators (\\uFEFFabc)
+15.5.4.20-4-10: FAIL
+# Bug? String.prototype.trim handles whitepace and lineterminators (abc\\uFEFF)
+15.5.4.20-4-18: FAIL
+# Bug? String.prototype.trim handles whitepace and lineterminators
+# (\\uFEFF\\uFEFF)
+15.5.4.20-4-34: FAIL
# Bug? Date.prototype.toISOString - RangeError is thrown when value of date is
# Date(1970, 0, -99999999, 0, 0, 0, -1), the time zone is UTC(0)
15.9.5.43-0-8: FAIL
@@ -666,31 +1319,36 @@ S15.4.4.3_A2_T1: FAIL_OK
# Bug? Date.prototype.toISOString - value of year is Infinity
# Date.prototype.toISOString throw the RangeError
15.9.5.43-0-15: FAIL
+# Bug? RegExp - the thrown error is SyntaxError instead of RegExpError when 'F'
+# contains any character other than 'g', 'i', or 'm'
+15.10.4.1-3: FAIL
+# Bug? RegExp.prototype is itself a RegExp
+15.10.6: FAIL
+# Bug? RegExp.prototype.source is of type String
+15.10.7.1-1: FAIL
+# Bug? RegExp.prototype.source is a data property with default attribute values
+# (false)
+15.10.7.1-2: FAIL
+# Bug? RegExp.prototype.global is of type Boolean
+15.10.7.2-1: FAIL
+# Bug? RegExp.prototype.global is a data property with default attribute values
+# (false)
+15.10.7.2-2: FAIL
+# Bug? RegExp.prototype.ignoreCase is of type Boolean
+15.10.7.3-1: FAIL
+# Bug? RegExp.prototype.ignoreCase is a data property with default attribute
+# values (false)
+15.10.7.3-2: FAIL
+# Bug? RegExp.prototype.multiline is of type Boolean
+15.10.7.4-1: FAIL
+# Bug? RegExp.prototype.multiline is a data property with default attribute
+# values (false)
+15.10.7.4-2: FAIL
+# Bug? RegExp.prototype.lastIndex is of type Number
+15.10.7.5-1: FAIL
+# Bug? RegExp.prototype.lastIndex is a data property with specified attribute
+# values
+15.10.7.5-2: FAIL
# Bug? Error.prototype.toString return the value of 'msg' when 'name' is empty
# string and 'msg' isn't undefined
15.11.4.4-8-1: FAIL
-
-############################ SKIPPED TESTS #############################
-
-# These tests take a looong time to run in debug mode.
-S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
-S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
-
-[ $arch == arm ]
-
-# BUG(3251225): Tests that timeout with --nocrankshaft.
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-S15.1.3.1_A2.4_T1: SKIP
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.4_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-S15.1.3.3_A2.3_T1: SKIP
-S15.1.3.4_A2.3_T1: SKIP
-S15.1.3.1_A2.5_T1: SKIP
-S15.1.3.2_A2.5_T1: SKIP
-
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 52127cdd2f..9482046034 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -43,10 +43,10 @@ class Test262TestCase(test.TestCase):
self.root = root
def IsNegative(self):
- return '@negative' in self.GetSource()
+ return self.filename.endswith('-n.js')
def GetLabel(self):
- return "%s test262 %s" % (self.mode, self.GetName())
+ return "%s test262 %s %s" % (self.mode, self.GetGroup(), self.GetName())
def IsFailureOutput(self, output):
if output.exit_code != 0:
@@ -63,6 +63,9 @@ class Test262TestCase(test.TestCase):
def GetName(self):
return self.path[-1]
+ def GetGroup(self):
+ return self.path[0]
+
def GetSource(self):
return open(self.filename).read()
@@ -72,14 +75,13 @@ class Test262TestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(Test262TestConfiguration, self).__init__(context, root)
- def ListTests(self, current_path, path, mode, variant_flags):
- testroot = join(self.root, 'data', 'test', 'suite')
+ def AddIETestCenter(self, tests, current_path, path, mode):
+ current_root = join(self.root, 'data', 'test', 'suite', 'ietestcenter')
harness = [join(self.root, 'data', 'test', 'harness', f)
for f in TEST_262_HARNESS]
harness += [join(self.root, 'harness-adapt.js')]
- tests = []
- for root, dirs, files in os.walk(testroot):
- for dotted in [x for x in dirs if x.startswith('.')]:
+ for root, dirs, files in os.walk(current_root):
+ for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
@@ -87,11 +89,25 @@ class Test262TestConfiguration(test.TestConfiguration):
files.sort()
for file in files:
if file.endswith('.js'):
- test_path = ['test262', file[:-3]]
- if self.Contains(path, test_path):
+ if self.Contains(path, root_path):
+ test_path = ['ietestcenter', file[:-3]]
test = Test262TestCase(join(root, file), test_path, self.context,
self.root, mode, harness)
tests.append(test)
+
+ def AddSputnikConvertedTests(self, tests, current_path, path, mode):
+ # To be enabled
+ pass
+
+ def AddSputnikTests(self, tests, current_path, path, mode):
+ # To be enabled
+ pass
+
+ def ListTests(self, current_path, path, mode, variant_flags):
+ tests = []
+ self.AddIETestCenter(tests, current_path, path, mode)
+ self.AddSputnikConvertedTests(tests, current_path, path, mode)
+ self.AddSputnikTests(tests, current_path, path, mode)
return tests
def GetBuildRequirements(self):
diff --git a/deps/v8/tools/gc-nvp-trace-processor.py b/deps/v8/tools/gc-nvp-trace-processor.py
index de3dc90bd4..511ab2bcdf 100755
--- a/deps/v8/tools/gc-nvp-trace-processor.py
+++ b/deps/v8/tools/gc-nvp-trace-processor.py
@@ -226,10 +226,6 @@ def scavenge_scope(r):
return r['pause'] - r['external']
return 0
-
-def real_mutator(r):
- return r['mutator'] - r['stepstook']
-
plots = [
[
Set('style fill solid 0.5 noborder'),
@@ -240,24 +236,7 @@ plots = [
Item('Sweep', 'sweep', lc = 'blue'),
Item('Compaction', 'compact', lc = 'red'),
Item('External', 'external', lc = '#489D43'),
- Item('Other', other_scope, lc = 'grey'),
- Item('IGC Steps', 'stepstook', lc = '#FF6347'))
- ],
- [
- Set('style fill solid 0.5 noborder'),
- Set('style histogram rowstacked'),
- Set('style data histograms'),
- Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
- Item('Marking', 'mark', lc = 'purple'),
- Item('Sweep', 'sweep', lc = 'blue'),
- Item('Compaction', 'compact', lc = 'red'),
- Item('External', 'external', lc = '#489D43'),
- Item('Other', other_scope, lc = '#ADD8E6'),
- Item('External', 'external', lc = '#D3D3D3'))
- ],
-
- [
- Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
+ Item('Other', other_scope, lc = 'grey'))
],
[
Set('style histogram rowstacked'),
@@ -296,7 +275,7 @@ def freduce(f, field, trace, init):
return reduce(lambda t,r: f(t, r[field]), trace, init)
def calc_total(trace, field):
- return freduce(lambda t,v: t + long(v), field, trace, long(0))
+ return freduce(lambda t,v: t + v, field, trace, 0)
def calc_max(trace, field):
return freduce(lambda t,r: max(t, r), field, trace, 0)
@@ -311,8 +290,6 @@ def process_trace(filename):
marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
markcompacts = filter(lambda r: r['gc'] == 'mc', trace)
scavenges = filter(lambda r: r['gc'] == 's', trace)
- globalgcs = filter(lambda r: r['gc'] != 's', trace)
-
charts = plot_all(plots, trace, filename)
@@ -325,7 +302,7 @@ def process_trace(filename):
else:
avg = 0
if n > 1:
- dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
+ dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
(n - 1))
else:
dev = 0
@@ -334,31 +311,6 @@ def process_trace(filename):
'<td>%d</td><td>%d [dev %f]</td></tr>' %
(prefix, n, total, max, avg, dev))
- def HumanReadable(size):
- suffixes = ['bytes', 'kB', 'MB', 'GB']
- power = 1
- for i in range(len(suffixes)):
- if size < power*1024:
- return "%.1f" % (float(size) / power) + " " + suffixes[i]
- power *= 1024
-
- def throughput(name, trace):
- total_live_after = calc_total(trace, 'total_size_after')
- total_live_before = calc_total(trace, 'total_size_before')
- total_gc = calc_total(trace, 'pause')
- if total_gc == 0:
- return
- out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
- (name,
- HumanReadable(total_live_after),
- total_gc,
- HumanReadable(total_live_after / total_gc)))
- out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
- (name,
- HumanReadable(total_live_before),
- total_gc,
- HumanReadable(total_live_before / total_gc)))
-
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
@@ -377,11 +329,6 @@ def process_trace(filename):
filter(lambda r: r['external'] != 0, trace),
'external')
out.write('</table>')
- throughput('TOTAL', trace)
- throughput('MS', marksweeps)
- throughput('MC', markcompacts)
- throughput('OLDSPACE', globalgcs)
- out.write('<br/>')
for chart in charts:
out.write('<img src="%s">' % chart)
out.write('</body></html>')
diff --git a/deps/v8/tools/gcmole/gccause.lua b/deps/v8/tools/gcmole/gccause.lua
index b9891767de..a6fe542137 100644
--- a/deps/v8/tools/gcmole/gccause.lua
+++ b/deps/v8/tools/gcmole/gccause.lua
@@ -48,8 +48,6 @@ local function TrackCause(name, lvl)
T[f] = true
TrackCause(f, (lvl or 0) + 1)
end
-
- if f == '<GC>' then break end
end
end
end
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 4812930072..50144172a0 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -340,8 +340,6 @@
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
- '../../src/incremental-marking.cc',
- '../../src/incremental-marking.h',
'../../src/inspector.cc',
'../../src/inspector.h',
'../../src/interpreter-irregexp.cc',
@@ -433,9 +431,6 @@
'../../src/spaces-inl.h',
'../../src/spaces.cc',
'../../src/spaces.h',
- '../../src/store-buffer-inl.h',
- '../../src/store-buffer.cc',
- '../../src/store-buffer.h',
'../../src/string-search.cc',
'../../src/string-search.h',
'../../src/string-stream.cc',
@@ -646,13 +641,6 @@
],
}
],
- ['OS=="solaris"', {
- 'sources': [
- '../../src/platform-solaris.cc',
- '../../src/platform-posix.cc',
- ],
- }
- ],
['OS=="mac"', {
'sources': [
'../../src/platform-macos.cc',
@@ -870,7 +858,7 @@
'targets': [
{
'target_name': 'v8',
- 'type': 'none',
+ 'type': 'settings',
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 7070ce6fcc..0b0a1fbd27 100755
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -1,14 +1,5 @@
#!/bin/sh
-# find the name of the log file to process, it must not start with a dash.
-log_file="v8.log"
-for arg in "$@"
-do
- if ! expr "X${arg}" : "^X-" > /dev/null; then
- log_file=${arg}
- fi
-done
-
tools_path=`cd $(dirname "$0");pwd`
if [ ! "$D8_PATH" ]; then
d8_public=`which d8`
@@ -18,20 +9,22 @@ fi
d8_exec=$D8_PATH/d8
if [ ! -x $d8_exec ]; then
- D8_PATH=`pwd`/out/native
- d8_exec=$D8_PATH/d8
-fi
-
-if [ ! -x $d8_exec ]; then
- d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
-fi
-
-if [ ! -x $d8_exec ]; then
echo "d8 shell not found in $D8_PATH"
- echo "To build, execute 'make native' from the V8 directory"
+ echo "To build, execute 'scons <flags> d8' from the V8 directory"
exit 1
fi
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if ! expr "X${arg}" : "^X-" > /dev/null; then
+ log_file=${arg}
+ fi
+done
+
+
# nm spits out 'no symbols found' messages to stderr.
cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index 30d10c3031..58cbb95851 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -399,16 +399,12 @@ class LogReader(object):
code = Code(name, start_address, end_address, origin, origin_offset)
conficting_code = self.code_map.Find(start_address)
if conficting_code:
- if not (conficting_code.start_address == code.start_address and
- conficting_code.end_address == code.end_address):
- self.code_map.Remove(conficting_code)
- else:
- LogReader._HandleCodeConflict(conficting_code, code)
- # TODO(vitalyr): this warning is too noisy because of our
- # attempts to reconstruct code log from the snapshot.
- # print >>sys.stderr, \
- # "Warning: Skipping duplicate code log entry %s" % code
- continue
+ LogReader._HandleCodeConflict(conficting_code, code)
+ # TODO(vitalyr): this warning is too noisy because of our
+ # attempts to reconstruct code log from the snapshot.
+ # print >>sys.stderr, \
+ # "Warning: Skipping duplicate code log entry %s" % code
+ continue
self.code_map.Add(code)
continue
diff --git a/deps/v8/tools/logreader.js b/deps/v8/tools/logreader.js
index a8141da21b..315e721276 100644
--- a/deps/v8/tools/logreader.js
+++ b/deps/v8/tools/logreader.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -134,8 +134,9 @@ LogReader.prototype.skipDispatch = function(dispatch) {
LogReader.prototype.dispatchLogRow_ = function(fields) {
// Obtain the dispatch.
var command = fields[0];
- if (!(command in this.dispatchTable_)) return;
-
+ if (!(command in this.dispatchTable_)) {
+ throw new Error('unknown command: ' + command);
+ }
var dispatch = this.dispatchTable_[command];
if (dispatch === null || this.skipDispatch(dispatch)) {
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 7af6e3d0d8..fda7ba96e5 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -211,12 +211,7 @@ class CppLintProcessor(SourceFileProcessor):
if exists(local_cpplint):
command = ['python', local_cpplint, '--filter', filt] + join(files)
- try:
- process = subprocess.Popen(command, stderr=subprocess.PIPE)
- except:
- print('Error running cpplint.py. Please make sure you have depot_tools' +
- ' in your $PATH. Lint check skipped.')
- return True
+ process = subprocess.Popen(command, stderr=subprocess.PIPE)
LINT_ERROR_PATTERN = re.compile(r'^(.+)[:(]\d+[:)]')
while True:
out_line = process.stderr.readline()
diff --git a/deps/v8/tools/push-to-trunk.sh b/deps/v8/tools/push-to-trunk.sh
index bd5d003cfd..761b733679 100755
--- a/deps/v8/tools/push-to-trunk.sh
+++ b/deps/v8/tools/push-to-trunk.sh
@@ -202,14 +202,10 @@ if [ $STEP -le 4 ] ; then
for commit in $COMMITS ; do
# Get the commit's title line.
git log -1 $commit --format="%w(80,8,8)%s" >> "$CHANGELOG_ENTRY_FILE"
- # Grep for "BUG=xxxx" lines in the commit message and convert them to
- # "(issue xxxx)".
- git log -1 $commit --format="%B" \
- | grep "^BUG=" | grep -v "BUG=$" \
- | sed -e 's/^/ /' \
- | sed -e 's/BUG=v8:\(.*\)$/(issue \1)/' \
- | sed -e 's/BUG=\(.*\)$/(Chromium issue \1)/' \
- >> "$CHANGELOG_ENTRY_FILE"
+ # Grep for "BUG=xxxx" lines in the commit message.
+ git log -1 $commit --format="%b" | grep BUG= | grep -v "BUG=$" \
+ | sed -e 's/^/ /' \
+ >> "$CHANGELOG_ENTRY_FILE"
# Append the commit's author for reference.
git log -1 $commit --format="%w(80,8,8)(%an)" >> "$CHANGELOG_ENTRY_FILE"
echo "" >> "$CHANGELOG_ENTRY_FILE"
diff --git a/deps/v8/tools/test-wrapper-gypbuild.py b/deps/v8/tools/test-wrapper-gypbuild.py
index a990b7ee59..ad5449a404 100755
--- a/deps/v8/tools/test-wrapper-gypbuild.py
+++ b/deps/v8/tools/test-wrapper-gypbuild.py
@@ -131,20 +131,16 @@ def BuildOptions():
def ProcessOptions(options):
- if options.arch_and_mode == ".":
- options.arch = []
- options.mode = []
- else:
- if options.arch_and_mode != None and options.arch_and_mode != "":
- tokens = options.arch_and_mode.split(".")
- options.arch = tokens[0]
- options.mode = tokens[1]
- options.mode = options.mode.split(',')
- options.arch = options.arch.split(',')
+ if options.arch_and_mode != None and options.arch_and_mode != "":
+ tokens = options.arch_and_mode.split(".")
+ options.arch = tokens[0]
+ options.mode = tokens[1]
+ options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
+ options.arch = options.arch.split(',')
for arch in options.arch:
if not arch in ['ia32', 'x64', 'arm']:
print "Unknown architecture %s" % arch
@@ -169,7 +165,7 @@ def PassOnOptions(options):
if options.snapshot:
result += ['--snapshot']
if options.special_command:
- result += ['--special-command="%s"' % options.special_command]
+ result += ['--special-command=' + options.special_command]
if options.valgrind:
result += ['--valgrind']
if options.cat:
@@ -236,18 +232,6 @@ def Main():
env=env)
returncodes += child.wait()
- if len(options.mode) == 0 and len(options.arch) == 0:
- print ">>> running tests"
- shellpath = workspace + '/' + options.outdir
- env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
- shell = shellpath + '/d8'
- child = subprocess.Popen(' '.join(args_for_children +
- ['--shell=' + shell]),
- shell=True,
- cwd=workspace,
- env=env)
- returncodes = child.wait()
-
return returncodes